filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_7453 | from application import app, db
from flask import redirect, render_template, request, url_for
from application.visits.models import Visit
from application.yhteenveto.forms import InYearForm, InMonthForm
from flask_login.utils import login_required, current_user
from application.sivu.models import Sivu
from sqlalchemy.sql import text
@app.route("/yhteenveto/alku/", methods=["GET"])
@login_required
def yhteenveto_alku():
return render_template("yhteenveto/valinta.html", title="Yhteenvedot")
@app.route("/yhteenveto/vuodessa/", methods=["GET", "POST"])
@login_required
def yhteenveto_vuodessa():
if request.method == 'POST':
form = InYearForm(request.form)
stmt = text("SELECT sivu.osoite, SUM(visit.lukumaara) AS maara FROM sivu, visit WHERE visit.vuosi = :vuosi AND visit.sivu_id = sivu.id AND sivu.account_id = :id GROUP BY sivu.osoite").params(vuosi=form.year.data, id=current_user.id)
result = db.engine.execute(stmt)
return render_template("yhteenveto/vuodessa.html", title="Käyntejä sivuilla vuodessa", vuosi=result)
else:
return render_template("yhteenveto/kyselyvuodessa.html", title="Käyntejä sivuilla vuodessa", form = InYearForm())
@app.route("/yhteenveto/ryhma/", methods=["GET", "POST"])
@login_required
def yhteenveto_ryhmatulos():
if request.method == 'POST':
form = InMonthForm(request.form)
stmt = text("SELECT sivu.ryhma AS ryhma, SUM(visit.lukumaara) AS maara FROM sivu, visit WHERE visit.vuosi = :vuosi AND visit.kuukausi = :kuukausi AND visit.sivu_id = sivu.id AND sivu.account_id = :id GROUP BY sivu.ryhma").params(vuosi=form.year.data, kuukausi=form.month.data, id=current_user.id)
result = db.engine.execute(stmt)
return render_template("yhteenveto/ryhmassa.html", title="Käyntejä sivuryhmissä vuodessa", vuosi=result)
else:
return render_template("yhteenveto/kyselyryhmassa.html", title="Vuoden tilasto", form = InMonthForm())
@app.route("/yhteenveto/selaimia/", methods=["GET", "POST"])
@login_required
def yhteenveto_selaimia():
if request.method == 'POST':
form = InYearForm(request.form)
stmt = text("SELECT selain.selain AS nimi, SUM(selain.kaynnit) AS maara FROM sivu, selain, kavijat WHERE selain.kavijat_id = kavijat.id AND kavijat.vuosi = :vuosi AND kavijat.sivu_id = sivu.id AND sivu.account_id = :id GROUP BY selain.selain").params(vuosi=form.year.data, id=current_user.id)
result = db.engine.execute(stmt)
return render_template("yhteenveto/selaimia.html", title="Selaimien yhteenveto", selaimet=result)
else:
return render_template("yhteenveto/selainvuosi.html", title="Vuoden tilasto", form = InYearForm())
@app.route("/yhteenveto/kavijoita/", methods=["GET", "POST"])
@login_required
def yhteenveto_kavijoita():
if request.method == 'POST':
form = InYearForm(request.form)
stmt = text("SELECT sivu.osoite, SUM(kavijat.kaynnit) AS maara FROM sivu, kavijat WHERE kavijat.vuosi = :vuosi AND kavijat.sivu_id = sivu.id AND sivu.account_id = :id GROUP BY sivu.osoite").params(vuosi=form.year.data, id=current_user.id)
result = db.engine.execute(stmt)
return render_template("yhteenveto/kavijoita.html", title="Kavijoita sivuilla vuodessa", kavijat=result)
else:
return render_template("yhteenveto/kavijavuosi.html", title="Vuoden tilasto", form = InYearForm()) |
the-stack_0_7456 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="keras-grid-search-cacheable",
version="1.0.0",
author="Daniel Espinosa",
author_email="[email protected]",
description="Reducción de tiempo de ejecución de los algoritmos de Machine Learning con búsqueda de parámetros en GridSearch.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/machine-learning-tools/keras-grid-search-cacheable",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6.9',
install_requires=[
'tensorflow>=1.15.0'
]
)
|
the-stack_0_7457 | # $Id: __init__.py 7661 2013-05-07 10:52:59Z milde $
# Author: David Goodger
# Maintainer: [email protected]
# Copyright: This module has been placed in the public domain.
"""
Simple HyperText Markup Language document tree Writer.
The output conforms to the XHTML version 1.0 Transitional DTD
(*almost* strict). The output contains a minimum of formatting
information. The cascading style sheet "html4css1.css" is required
for proper viewing with a modern graphical browser.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import time
import re
import urllib.request, urllib.parse, urllib.error
try: # check for the Python Imaging Library
import PIL.Image
except ImportError:
try: # sometimes PIL modules are put in PYTHONPATH's root
import Image
class PIL(object): pass # dummy wrapper
PIL.Image = Image
except ImportError:
PIL = None
import docutils
from docutils import frontend, nodes, utils, writers, languages, io
from docutils.utils.error_reporting import SafeString
from docutils.transforms import writer_aux
from docutils.utils.math import unichar2tex, pick_math_environment, math2html
from docutils.utils.math.latex2mathml import parse_latex_math
class Writer(writers.Writer):
supported = ('html', 'html4css1', 'xhtml')
"""Formats this writer supports."""
default_stylesheet = 'html4css1.css'
default_stylesheet_dirs = ['.', utils.relative_path(
os.path.join(os.getcwd(), 'dummy'), os.path.dirname(__file__))]
default_template = 'template.txt'
default_template_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_template))
settings_spec = (
'HTML-Specific Options',
None,
(('Specify the template file (UTF-8 encoded). Default is "%s".'
% default_template_path,
['--template'],
{'default': default_template_path, 'metavar': '<file>'}),
('Comma separated list of stylesheet URLs. '
'Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'metavar': '<URL[,URL,...]>', 'overrides': 'stylesheet_path',
'validator': frontend.validate_comma_separated_list}),
('Comma separated list of stylesheet paths. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output HTML file. '
'Default: "%s"' % default_stylesheet,
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list,
'default': [default_stylesheet]}),
('Embed the stylesheet(s) in the output HTML file. The stylesheet '
'files must be accessible during processing. This is the default.',
['--embed-stylesheet'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Link to the stylesheet(s) in the output HTML file. '
'Default: embed stylesheets.',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'Default: "%s"' % default_stylesheet_dirs,
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': default_stylesheet_dirs}),
('Specify the initial header level. Default is 1 for "<h1>". '
'Does not affect document title & subtitle (see --no-doc-title).',
['--initial-header-level'],
{'choices': '1 2 3 4 5 6'.split(), 'default': '1',
'metavar': '<level>'}),
('Specify the maximum width (in characters) for one-column field '
'names. Longer field names will span an entire row of the table '
'used to render the field list. Default is 14 characters. '
'Use 0 for "no limit".',
['--field-name-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Specify the maximum width (in characters) for options in option '
'lists. Longer options will span an entire row of the table used '
'to render the option list. Default is 14 characters. '
'Use 0 for "no limit".',
['--option-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "brackets".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'brackets',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Remove extra vertical whitespace between items of "simple" bullet '
'lists and enumerated lists. Default: enabled.',
['--compact-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple bullet and enumerated lists.',
['--no-compact-lists'],
{'dest': 'compact_lists', 'action': 'store_false'}),
('Remove extra vertical whitespace between items of simple field '
'lists. Default: enabled.',
['--compact-field-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple field lists.',
['--no-compact-field-lists'],
{'dest': 'compact_field_lists', 'action': 'store_false'}),
('Added to standard table classes. '
'Defined styles: "borderless". Default: ""',
['--table-style'],
{'default': ''}),
('Math output format, one of "MathML", "HTML", "MathJax" '
'or "LaTeX". Default: "HTML math.css"',
['--math-output'],
{'default': 'HTML math.css'}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Obfuscate email addresses to confuse harvesters while still '
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'html4css1 writer'
config_section_dependencies = ('writers',)
visitor_attributes = (
'head_prefix', 'head', 'stylesheet', 'body_prefix',
'body_pre_docinfo', 'docinfo', 'body', 'body_suffix',
'title', 'subtitle', 'header', 'footer', 'meta', 'fragment',
'html_prolog', 'html_head', 'html_title', 'html_subtitle',
'html_body')
def get_transforms(self):
return writers.Writer.get_transforms(self) + [writer_aux.Admonitions]
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = HTMLTranslator
def translate(self):
self.visitor = visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
for attr in self.visitor_attributes:
setattr(self, attr, getattr(visitor, attr))
self.output = self.apply_template()
def apply_template(self):
template_file = open(self.document.settings.template, 'rb')
template = str(template_file.read(), 'utf-8')
template_file.close()
subs = self.interpolation_dict()
return template % subs
def interpolation_dict(self):
subs = {}
settings = self.document.settings
for attr in self.visitor_attributes:
subs[attr] = ''.join(getattr(self, attr)).rstrip('\n')
subs['encoding'] = settings.output_encoding
subs['version'] = docutils.__version__
return subs
def assemble_parts(self):
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
self.parts[part] = ''.join(getattr(self, part))
class HTMLTranslator(nodes.NodeVisitor):
"""
This HTML writer has been optimized to produce visually compact
lists (less vertical whitespace). HTML's mixed content models
allow list items to contain "<li><p>body elements</p></li>" or
"<li>just text</li>" or even "<li>text<p>and body
elements</p>combined</li>", each with different effects. It would
be best to stick with strict body elements in list items, but they
affect vertical spacing in browsers (although they really
shouldn't).
Here is an outline of the optimization:
- Check for and omit <p> tags in "simple" lists: list items
contain either a single paragraph, a nested simple list, or a
paragraph followed by a nested simple list. This means that
this list can be compact:
- Item 1.
- Item 2.
But this list cannot be compact:
- Item 1.
This second paragraph forces space between list items.
- Item 2.
- In non-list contexts, omit <p> tags on a paragraph if that
paragraph is the only child of its parent (footnotes & citations
are allowed a label first).
- Regardless of the above, in definitions, table cells, field bodies,
option descriptions, and list items, mark the first child with
'class="first"' and the last child with 'class="last"'. The stylesheet
sets the margins (top & bottom respectively) to 0 for these elements.
The ``no_compact_lists`` setting (``--no-compact-lists`` command-line
option) disables list whitespace optimization.
"""
xml_declaration = '<?xml version="1.0" encoding="%s" ?>\n'
doctype = (
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
doctype_mathml = doctype
head_prefix_template = ('<html xmlns="http://www.w3.org/1999/xhtml"'
' xml:lang="%(lang)s" lang="%(lang)s">\n<head>\n')
content_type = ('<meta http-equiv="Content-Type"'
' content="text/html; charset=%s" />\n')
content_type_mathml = ('<meta http-equiv="Content-Type"'
' content="application/xhtml+xml; charset=%s" />\n')
generator = ('<meta name="generator" content="Docutils %s: '
'http://docutils.sourceforge.net/" />\n')
# Template for the MathJax script in the header:
mathjax_script = '<script type="text/javascript" src="%s"></script>\n'
# The latest version of MathJax from the distributed server:
# avaliable to the public under the `MathJax CDN Terms of Service`__
# __http://www.mathjax.org/download/mathjax-cdn-terms-of-service/
mathjax_url = ('http://cdn.mathjax.org/mathjax/latest/MathJax.js?'
'config=TeX-AMS-MML_HTMLorMML')
# may be overwritten by custom URL appended to "mathjax"
stylesheet_link = '<link rel="stylesheet" href="%s" type="text/css" />\n'
embedded_stylesheet = '<style type="text/css">\n\n%s\n</style>\n'
words_and_spaces = re.compile(r'\S+| +|\n')
sollbruchstelle = re.compile(r'.+\W\W.+|[-?].+', re.U) # wrap point inside word
lang_attribute = 'lang' # name changes to 'xml:lang' in XHTML 1.1
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode, document.reporter)
self.meta = [self.generator % docutils.__version__]
self.head_prefix = []
self.html_prolog = []
if settings.xml_declaration:
self.head_prefix.append(self.xml_declaration
% settings.output_encoding)
# encoding not interpolated:
self.html_prolog.append(self.xml_declaration)
self.head = self.meta[:]
self.stylesheet = [self.stylesheet_call(path)
for path in utils.get_stylesheet_list(settings)]
self.body_prefix = ['</head>\n<body>\n']
# document title, subtitle display
self.body_pre_docinfo = []
# author, date, etc.
self.docinfo = []
self.body = []
self.fragment = []
self.body_suffix = ['</body>\n</html>\n']
self.section_level = 0
self.initial_header_level = int(settings.initial_header_level)
self.math_output = settings.math_output.split()
self.math_output_options = self.math_output[1:]
self.math_output = self.math_output[0].lower()
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
self.topic_classes = []
self.colspecs = []
self.compact_p = True
self.compact_simple = False
self.compact_field_list = False
self.in_docinfo = False
self.in_sidebar = False
self.title = []
self.subtitle = []
self.header = []
self.footer = []
self.html_head = [self.content_type] # charset not interpolated
self.html_title = []
self.html_subtitle = []
self.html_body = []
self.in_document_title = 0 # len(self.body) or 0
self.in_mailto = False
self.author_in_authors = False
self.math_header = []
def astext(self):
return ''.join(self.head_prefix + self.head
+ self.stylesheet + self.body_prefix
+ self.body_pre_docinfo + self.docinfo
+ self.body + self.body_suffix)
def encode(self, text):
"""Encode special characters in `text` & return."""
# @@@ A codec to do these and all other HTML entities would be nice.
text = str(text)
return text.translate({
ord('&'): '&',
ord('<'): '<',
ord('"'): '"',
ord('>'): '>',
ord('@'): '@', # may thwart some address harvesters
# TODO: convert non-breaking space only if needed?
0xa0: ' '}) # non-breaking space
def cloak_mailto(self, uri):
"""Try to hide a mailto: URL from harvesters."""
# Encode "@" using a URL octet reference (see RFC 1738).
# Further cloaking with HTML entities will be done in the
# `attval` function.
return uri.replace('@', '%40')
def cloak_email(self, addr):
"""Try to hide the link text of a email link from harversters."""
# Surround at-signs and periods with <span> tags. ("@" has
# already been encoded to "@" by the `encode` method.)
addr = addr.replace('@', '<span>@</span>')
addr = addr.replace('.', '<span>.</span>')
return addr
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, HTML encode, and return attribute value text."""
encoded = self.encode(whitespace.sub(' ', text))
if self.in_mailto and self.settings.cloak_email_addresses:
# Cloak at-signs ("%40") and periods with HTML entities.
encoded = encoded.replace('%40', '%40')
encoded = encoded.replace('.', '.')
return encoded
def stylesheet_call(self, path):
"""Return code to reference or embed stylesheet file `path`"""
if self.settings.embed_stylesheet:
try:
content = io.FileInput(source_path=path,
encoding='utf-8').read()
self.settings.record_dependencies.add(path)
except IOError as err:
msg = "Cannot embed stylesheet '%s': %s." % (
path, SafeString(err.strerror))
self.document.reporter.error(msg)
return '<--- %s --->\n' % msg
return self.embedded_stylesheet % content
# else link to style file:
if self.settings.stylesheet_path:
# adapt path relative to output (cf. config.html#stylesheet-path)
path = utils.relative_path(self.settings._destination, path)
return self.stylesheet_link % self.encode(path)
def starttag(self, node, tagname, suffix='\n', empty=False, **attributes):
"""
Construct and return a start tag given a node (id & class attributes
are extracted), tag name, and optional attributes.
"""
tagname = tagname.lower()
prefix = []
atts = {}
ids = []
for (name, value) in list(attributes.items()):
atts[name.lower()] = value
classes = []
languages = []
# unify class arguments and move language specification
for cls in node.get('classes', []) + atts.pop('class', '').split() :
if cls.startswith('language-'):
languages.append(cls[9:])
elif cls.strip() and cls not in classes:
classes.append(cls)
if languages:
# attribute name is 'lang' in XHTML 1.0 but 'xml:lang' in 1.1
atts[self.lang_attribute] = languages[0]
if classes:
atts['class'] = ' '.join(classes)
assert 'id' not in atts
ids.extend(node.get('ids', []))
if 'ids' in atts:
ids.extend(atts['ids'])
del atts['ids']
if ids:
atts['id'] = ids[0]
for id in ids[1:]:
# Add empty "span" elements for additional IDs. Note
# that we cannot use empty "a" elements because there
# may be targets inside of references, but nested "a"
# elements aren't allowed in XHTML (even if they do
# not all have a "href" attribute).
if empty:
# Empty tag. Insert target right in front of element.
prefix.append('<span id="%s"></span>' % id)
else:
# Non-empty tag. Place the auxiliary <span> tag
# *inside* the element, as the first child.
suffix += '<span id="%s"></span>' % id
attlist = list(atts.items())
attlist.sort()
parts = [tagname]
for name, value in attlist:
# value=None was used for boolean attributes without
# value, but this isn't supported by XHTML.
assert value is not None
if isinstance(value, list):
values = [str(v) for v in value]
parts.append('%s="%s"' % (name.lower(),
self.attval(' '.join(values))))
else:
parts.append('%s="%s"' % (name.lower(),
self.attval(str(value))))
if empty:
infix = ' /'
else:
infix = ''
return ''.join(prefix) + '<%s%s>' % (' '.join(parts), infix) + suffix
def emptytag(self, node, tagname, suffix='\n', **attributes):
"""Construct and return an XML-compatible empty tag."""
return self.starttag(node, tagname, suffix, empty=True, **attributes)
def set_class_on_child(self, node, class_, index=0):
"""
Set class `class_` on the visible child no. index of `node`.
Do nothing if node has fewer children than `index`.
"""
children = [n for n in node if not isinstance(n, nodes.Invisible)]
try:
child = children[index]
except IndexError:
return
child['classes'].append(class_)
def set_first_last(self, node):
self.set_class_on_child(node, 'first', 0)
self.set_class_on_child(node, 'last', -1)
def visit_Text(self, node):
text = node.astext()
encoded = self.encode(text)
if self.in_mailto and self.settings.cloak_email_addresses:
encoded = self.cloak_email(encoded)
self.body.append(encoded)
def depart_Text(self, node):
pass
def visit_abbreviation(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'abbr', ''))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_acronym(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'acronym', ''))
def depart_acronym(self, node):
self.body.append('</acronym>')
def visit_address(self, node):
self.visit_docinfo_item(node, 'address', meta=False)
self.body.append(self.starttag(node, 'pre', CLASS='address'))
def depart_address(self, node):
self.body.append('\n</pre>\n')
self.depart_docinfo_item()
def visit_admonition(self, node):
self.body.append(self.starttag(node, 'div'))
self.set_first_last(node)
def depart_admonition(self, node=None):
self.body.append('</div>\n')
attribution_formats = {'dash': ('—', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.context.append(suffix)
self.body.append(
self.starttag(node, 'p', prefix, CLASS='attribution'))
def depart_attribution(self, node):
self.body.append(self.context.pop() + '</p>\n')
def visit_author(self, node):
if isinstance(node.parent, nodes.authors):
if self.author_in_authors:
self.body.append('\n<br />')
else:
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
if isinstance(node.parent, nodes.authors):
self.author_in_authors = True
else:
self.depart_docinfo_item()
def visit_authors(self, node):
self.visit_docinfo_item(node, 'authors')
self.author_in_authors = False # initialize
def depart_authors(self, node):
self.depart_docinfo_item()
def visit_block_quote(self, node):
self.body.append(self.starttag(node, 'blockquote'))
def depart_block_quote(self, node):
self.body.append('</blockquote>\n')
def check_simple_list(self, node):
"""Check for a simple list that can be rendered compactly."""
visitor = SimpleListChecker(self.document)
try:
node.walk(visitor)
except nodes.NodeFound:
return None
else:
return 1
def is_compactable(self, node):
return ('compact' in node['classes']
or (self.settings.compact_lists
and 'open' not in node['classes']
and (self.compact_simple
or self.topic_classes == ['contents']
or self.check_simple_list(node))))
def visit_bullet_list(self, node):
atts = {}
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = 'simple'
self.body.append(self.starttag(node, 'ul', **atts))
def depart_bullet_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ul>\n')
def visit_caption(self, node):
self.body.append(self.starttag(node, 'p', '', CLASS='caption'))
def depart_caption(self, node):
self.body.append('</p>\n')
def visit_citation(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils citation',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def depart_citation(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_citation_reference(self, node):
href = '#'
if 'refid' in node:
href += node['refid']
elif 'refname' in node:
href += self.document.nameids[node['refname']]
# else: # TODO system message (or already in the transform)?
# 'Citation reference missing.'
self.body.append(self.starttag(
node, 'a', '[', CLASS='citation-reference', href=href))
def depart_citation_reference(self, node):
self.body.append(']</a>')
def visit_classifier(self, node):
self.body.append(' <span class="classifier-delimiter">:</span> ')
self.body.append(self.starttag(node, 'span', '', CLASS='classifier'))
def depart_classifier(self, node):
self.body.append('</span>')
def visit_colspec(self, node):
self.colspecs.append(node)
# "stubs" list is an attribute of the tgroup element:
node.parent.stubs.append(node.attributes.get('stub'))
def depart_colspec(self, node):
pass
def write_colspecs(self):
width = 0
for node in self.colspecs:
width += node['colwidth']
for node in self.colspecs:
colwidth = int(node['colwidth'] * 100.0 / width + 0.5)
self.body.append(self.emptytag(node, 'col',
width='%i%%' % colwidth))
self.colspecs = []
def visit_comment(self, node,
sub=re.compile('-(?=-)').sub):
"""Escape double-dashes in comment text."""
self.body.append('<!-- %s -->\n' % sub('- ', node.astext()))
# Content already processed:
raise nodes.SkipNode
def visit_compound(self, node):
self.body.append(self.starttag(node, 'div', CLASS='compound'))
if len(node) > 1:
node[0]['classes'].append('compound-first')
node[-1]['classes'].append('compound-last')
for child in node[1:-1]:
child['classes'].append('compound-middle')
def depart_compound(self, node):
self.body.append('</div>\n')
def visit_container(self, node):
self.body.append(self.starttag(node, 'div', CLASS='container'))
def depart_container(self, node):
self.body.append('</div>\n')
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact', meta=False)
def depart_contact(self, node):
self.depart_docinfo_item()
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item()
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item()
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
self.body.append('</dt>\n')
self.body.append(self.starttag(node, 'dd', ''))
self.set_first_last(node)
def depart_definition(self, node):
self.body.append('</dd>\n')
def visit_definition_list(self, node):
self.body.append(self.starttag(node, 'dl', CLASS='docutils'))
def depart_definition_list(self, node):
self.body.append('</dl>\n')
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.body.append(self.starttag(node, 'td', ''))
self.set_first_last(node)
def depart_description(self, node):
self.body.append('</td>')
def visit_docinfo(self, node):
self.context.append(len(self.body))
self.body.append(self.starttag(node, 'table',
CLASS='docinfo',
frame="void", rules="none"))
self.body.append('<col class="docinfo-name" />\n'
'<col class="docinfo-content" />\n'
'<tbody valign="top">\n')
self.in_docinfo = True
def depart_docinfo(self, node):
self.body.append('</tbody>\n</table>\n')
self.in_docinfo = False
start = self.context.pop()
self.docinfo = self.body[start:]
self.body = []
def visit_docinfo_item(self, node, name, meta=True):
if meta:
meta_tag = '<meta name="%s" content="%s" />\n' \
% (name, self.attval(node.astext()))
self.add_meta(meta_tag)
self.body.append(self.starttag(node, 'tr', ''))
self.body.append('<th class="docinfo-name">%s:</th>\n<td>'
% self.language.labels[name])
if len(node):
if isinstance(node[0], nodes.Element):
node[0]['classes'].append('first')
if isinstance(node[-1], nodes.Element):
node[-1]['classes'].append('last')
def depart_docinfo_item(self):
self.body.append('</td></tr>\n')
def visit_doctest_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='doctest-block'))
def depart_doctest_block(self, node):
self.body.append('\n</pre>\n')
def visit_document(self, node):
self.head.append('<title>%s</title>\n'
% self.encode(node.get('title', '')))
def depart_document(self, node):
self.head_prefix.extend([self.doctype,
self.head_prefix_template %
{'lang': self.settings.language_code}])
self.html_prolog.append(self.doctype)
self.meta.insert(0, self.content_type % self.settings.output_encoding)
self.head.insert(0, self.content_type % self.settings.output_encoding)
if self.math_header:
if self.math_output == 'mathjax':
self.head.extend(self.math_header)
else:
self.stylesheet.extend(self.math_header)
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.body_prefix.append(self.starttag(node, 'div', CLASS='document'))
self.body_suffix.insert(0, '</div>\n')
self.fragment.extend(self.body) # self.fragment is the "naked" body
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
assert not self.context, 'len(context) = %s' % len(self.context)
def visit_emphasis(self, node):
self.body.append(self.starttag(node, 'em', ''))
def depart_emphasis(self, node):
self.body.append('</em>')
def visit_entry(self, node):
atts = {'class': []}
if isinstance(node.parent.parent, nodes.thead):
atts['class'].append('head')
if node.parent.parent.parent.stubs[node.parent.column]:
# "stubs" list is an attribute of the tgroup element
atts['class'].append('stub')
if atts['class']:
tagname = 'th'
atts['class'] = ' '.join(atts['class'])
else:
tagname = 'td'
del atts['class']
node.parent.column += 1
if 'morerows' in node:
atts['rowspan'] = node['morerows'] + 1
if 'morecols' in node:
atts['colspan'] = node['morecols'] + 1
node.parent.column += node['morecols']
self.body.append(self.starttag(node, tagname, '', **atts))
self.context.append('</%s>\n' % tagname.lower())
if len(node) == 0: # empty cell
self.body.append(' ')
self.set_first_last(node)
def depart_entry(self, node):
self.body.append(self.context.pop())
def visit_enumerated_list(self, node):
"""
The 'start' attribute does not conform to HTML 4.01's strict.dtd, but
CSS1 doesn't help. CSS2 isn't widely enough supported yet to be
usable.
"""
atts = {}
if 'start' in node:
atts['start'] = node['start']
if 'enumtype' in node:
atts['class'] = node['enumtype']
# @@@ To do: prefix, suffix. How? Change prefix/suffix to a
# single "format" attribute? Use CSS2?
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = (atts.get('class', '') + ' simple').strip()
self.body.append(self.starttag(node, 'ol', **atts))
def depart_enumerated_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ol>\n')
def visit_field(self, node):
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def depart_field(self, node):
self.body.append('</tr>\n')
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'td', '', CLASS='field-body'))
self.set_class_on_child(node, 'first', 0)
field = node.parent
if (self.compact_field_list or
isinstance(field.parent, nodes.docinfo) or
field.parent.index(field) == len(field.parent) - 1):
# If we are in a compact list, the docinfo, or if this is
# the last field of the field list, do not add vertical
# space after last element.
self.set_class_on_child(node, 'last', -1)
def depart_field_body(self, node):
self.body.append('</td>\n')
def visit_field_list(self, node):
self.context.append((self.compact_field_list, self.compact_p))
self.compact_p = None
if 'compact' in node['classes']:
self.compact_field_list = True
elif (self.settings.compact_field_lists
and 'open' not in node['classes']):
self.compact_field_list = True
if self.compact_field_list:
for field in node:
field_body = field[-1]
assert isinstance(field_body, nodes.field_body)
children = [n for n in field_body
if not isinstance(n, nodes.Invisible)]
if not (len(children) == 0 or
len(children) == 1 and
isinstance(children[0],
(nodes.paragraph, nodes.line_block))):
self.compact_field_list = False
break
self.body.append(self.starttag(node, 'table', frame='void',
rules='none',
CLASS='docutils field-list'))
self.body.append('<col class="field-name" />\n'
'<col class="field-body" />\n'
'<tbody valign="top">\n')
def depart_field_list(self, node):
self.body.append('</tbody>\n</table>\n')
self.compact_field_list, self.compact_p = self.context.pop()
def visit_field_name(self, node):
atts = {}
if self.in_docinfo:
atts['class'] = 'docinfo-name'
else:
atts['class'] = 'field-name'
if ( self.settings.field_name_limit
and len(node.astext()) > self.settings.field_name_limit):
atts['colspan'] = 2
self.context.append('</tr>\n'
+ self.starttag(node.parent, 'tr', '',
CLASS='field')
+ '<td> </td>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'th', '', **atts))
def depart_field_name(self, node):
self.body.append(':</th>')
self.body.append(self.context.pop())
def visit_figure(self, node):
atts = {'class': 'figure'}
if node.get('width'):
atts['style'] = 'width: %s' % node['width']
if node.get('align'):
atts['class'] += " align-" + node['align']
self.body.append(self.starttag(node, 'div', **atts))
def depart_figure(self, node):
self.body.append('</div>\n')
def visit_footer(self, node):
self.context.append(len(self.body))
def depart_footer(self, node):
start = self.context.pop()
footer = [self.starttag(node, 'div', CLASS='footer'),
'<hr class="footer" />\n']
footer.extend(self.body[start:])
footer.append('\n</div>\n')
self.footer.extend(footer)
self.body_suffix[:0] = footer
del self.body[start:]
def visit_footnote(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils footnote',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def footnote_backrefs(self, node):
backlinks = []
backrefs = node['backrefs']
if self.settings.footnote_backlinks and backrefs:
if len(backrefs) == 1:
self.context.append('')
self.context.append('</a>')
self.context.append('<a class="fn-backref" href="#%s">'
% backrefs[0])
else:
i = 1
for backref in backrefs:
backlinks.append('<a class="fn-backref" href="#%s">%s</a>'
% (backref, i))
i += 1
self.context.append('<em>(%s)</em> ' % ', '.join(backlinks))
self.context += ['', '']
else:
self.context.append('')
self.context += ['', '']
# If the node does not only consist of a label.
if len(node) > 1:
# If there are preceding backlinks, we do not set class
# 'first', because we need to retain the top-margin.
if not backlinks:
node[1]['classes'].append('first')
node[-1]['classes'].append('last')
def depart_footnote(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_footnote_reference(self, node):
href = '#' + node['refid']
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
else:
assert format == 'superscript'
suffix = '<sup>'
self.context.append('</sup>')
self.body.append(self.starttag(node, 'a', suffix,
CLASS='footnote-reference', href=href))
def depart_footnote_reference(self, node):
self.body.append(self.context.pop() + '</a>')
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.context.append(len(self.body))
def depart_header(self, node):
start = self.context.pop()
header = [self.starttag(node, 'div', CLASS='header')]
header.extend(self.body[start:])
header.append('\n<hr class="header"/>\n</div>\n')
self.body_prefix.extend(header)
self.header.extend(header)
del self.body[start:]
def visit_image(self, node):
atts = {}
uri = node['uri']
# place SVG and SWF images in an <object> element
types = {'.svg': 'image/svg+xml',
'.swf': 'application/x-shockwave-flash'}
ext = os.path.splitext(uri)[1].lower()
if ext in ('.svg', '.swf'):
atts['data'] = uri
atts['type'] = types[ext]
else:
atts['src'] = uri
atts['alt'] = node.get('alt', uri)
# image size
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
if 'scale' in node:
if (PIL and not ('width' in node and 'height' in node)
and self.settings.file_insertion_enabled):
imagepath = urllib.request.url2pathname(uri)
try:
img = PIL.Image.open(
imagepath.encode(sys.getfilesystemencoding()))
except (IOError, UnicodeEncodeError):
pass # TODO: warn?
else:
self.settings.record_dependencies.add(
imagepath.replace('\\', '/'))
if 'width' not in atts:
atts['width'] = str(img.size[0])
if 'height' not in atts:
atts['height'] = str(img.size[1])
del img
for att_name in 'width', 'height':
if att_name in atts:
match = re.match(r'([0-9.]+)(\S*)$', atts[att_name])
assert match
atts[att_name] = '%s%s' % (
float(match.group(1)) * (float(node['scale']) / 100),
match.group(2))
style = []
for att_name in 'width', 'height':
if att_name in atts:
if re.match(r'^[0-9.]+$', atts[att_name]):
# Interpret unitless values as pixels.
atts[att_name] += 'px'
style.append('%s: %s;' % (att_name, atts[att_name]))
del atts[att_name]
if style:
atts['style'] = ' '.join(style)
if (isinstance(node.parent, nodes.TextElement) or
(isinstance(node.parent, nodes.reference) and
not isinstance(node.parent.parent, nodes.TextElement))):
# Inline context or surrounded by <a>...</a>.
suffix = ''
else:
suffix = '\n'
if 'align' in node:
atts['class'] = 'align-%s' % node['align']
self.context.append('')
if ext in ('.svg', '.swf'): # place in an object element,
# do NOT use an empty tag: incorrect rendering in browsers
self.body.append(self.starttag(node, 'object', suffix, **atts) +
node.get('alt', uri) + '</object>' + suffix)
else:
self.body.append(self.emptytag(node, 'img', suffix, **atts))
def depart_image(self, node):
self.body.append(self.context.pop())
def visit_inline(self, node):
self.body.append(self.starttag(node, 'span', ''))
def depart_inline(self, node):
self.body.append('</span>')
def visit_label(self, node):
# Context added in footnote_backrefs.
self.body.append(self.starttag(node, 'td', '%s[' % self.context.pop(),
CLASS='label'))
def depart_label(self, node):
# Context added in footnote_backrefs.
self.body.append(']%s</td><td>%s' % (self.context.pop(), self.context.pop()))
def visit_legend(self, node):
self.body.append(self.starttag(node, 'div', CLASS='legend'))
def depart_legend(self, node):
self.body.append('</div>\n')
def visit_line(self, node):
self.body.append(self.starttag(node, 'div', suffix='', CLASS='line'))
if not len(node):
self.body.append('<br />')
def depart_line(self, node):
self.body.append('</div>\n')
def visit_line_block(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line-block'))
def depart_line_block(self, node):
self.body.append('</div>\n')
def visit_list_item(self, node):
self.body.append(self.starttag(node, 'li', ''))
if len(node):
node[0]['classes'].append('first')
def depart_list_item(self, node):
self.body.append('</li>\n')
def visit_literal(self, node):
# special case: "code" role
classes = node.get('classes', [])
if 'code' in classes:
# filter 'code' from class arguments
node['classes'] = [cls for cls in classes if cls != 'code']
self.body.append(self.starttag(node, 'code', ''))
return
self.body.append(
self.starttag(node, 'tt', '', CLASS='docutils literal'))
text = node.astext()
for token in self.words_and_spaces.findall(text):
if token.strip():
# Protect text like "--an-option" and the regular expression
# ``[+]?(\d+(\.\d*)?|\.\d+)`` from bad line wrapping
if self.sollbruchstelle.search(token):
self.body.append('<span class="pre">%s</span>'
% self.encode(token))
else:
self.body.append(self.encode(token))
elif token in ('\n', ' '):
# Allow breaks at whitespace:
self.body.append(token)
else:
# Protect runs of multiple spaces; the last space can wrap:
self.body.append(' ' * (len(token) - 1) + ' ')
self.body.append('</tt>')
# Content already processed:
raise nodes.SkipNode
def depart_literal(self, node):
# skipped unless literal element is from "code" role:
self.body.append('</code>')
def visit_literal_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='literal-block'))
def depart_literal_block(self, node):
self.body.append('\n</pre>\n')
def visit_math(self, node, math_env=''):
# If the method is called from visit_math_block(), math_env != ''.
# As there is no native HTML math support, we provide alternatives:
# LaTeX and MathJax math_output modes simply wrap the content,
# HTML and MathML math_output modes also convert the math_code.
if self.math_output not in ('mathml', 'html', 'mathjax', 'latex'):
self.document.reporter.error(
'math-output format "%s" not supported '
'falling back to "latex"'% self.math_output)
self.math_output = 'latex'
#
# HTML container
tags = {# math_output: (block, inline, class-arguments)
'mathml': ('div', '', ''),
'html': ('div', 'span', 'formula'),
'mathjax': ('div', 'span', 'math'),
'latex': ('pre', 'tt', 'math'),
}
tag = tags[self.math_output][math_env == '']
clsarg = tags[self.math_output][2]
# LaTeX container
wrappers = {# math_mode: (inline, block)
'mathml': (None, None),
'html': ('$%s$', '\\begin{%s}\n%s\n\\end{%s}'),
'mathjax': ('\(%s\)', '\\begin{%s}\n%s\n\\end{%s}'),
'latex': (None, None),
}
wrapper = wrappers[self.math_output][math_env != '']
# get and wrap content
math_code = node.astext().translate(unichar2tex.uni2tex_table)
if wrapper and math_env:
math_code = wrapper % (math_env, math_code, math_env)
elif wrapper:
math_code = wrapper % math_code
# settings and conversion
if self.math_output in ('latex', 'mathjax'):
math_code = self.encode(math_code)
if self.math_output == 'mathjax' and not self.math_header:
if self.math_output_options:
self.mathjax_url = self.math_output_options[0]
self.math_header = [self.mathjax_script % self.mathjax_url]
elif self.math_output == 'html':
if self.math_output_options and not self.math_header:
self.math_header = [self.stylesheet_call(
utils.find_file_in_dirs(s, self.settings.stylesheet_dirs))
for s in self.math_output_options[0].split(',')]
# TODO: fix display mode in matrices and fractions
math2html.DocumentParameters.displaymode = (math_env != '')
math_code = math2html.math2html(math_code)
elif self.math_output == 'mathml':
self.doctype = self.doctype_mathml
self.content_type = self.content_type_mathml
try:
mathml_tree = parse_latex_math(math_code, inline=not(math_env))
math_code = ''.join(mathml_tree.xml())
except SyntaxError as err:
err_node = self.document.reporter.error(err, base_node=node)
self.visit_system_message(err_node)
self.body.append(self.starttag(node, 'p'))
self.body.append(','.join(err.args))
self.body.append('</p>\n')
self.body.append(self.starttag(node, 'pre',
CLASS='literal-block'))
self.body.append(self.encode(math_code))
self.body.append('\n</pre>\n')
self.depart_system_message(err_node)
raise nodes.SkipNode
# append to document body
if tag:
self.body.append(self.starttag(node, tag,
suffix='\n'*bool(math_env),
CLASS=clsarg))
self.body.append(math_code)
if math_env:
self.body.append('\n')
if tag:
self.body.append('</%s>\n' % tag)
# Content already processed:
raise nodes.SkipNode
def depart_math(self, node):
pass # never reached
def visit_math_block(self, node):
# print node.astext().encode('utf8')
math_env = pick_math_environment(node.astext())
self.visit_math(node, math_env=math_env)
def depart_math_block(self, node):
pass # never reached
def visit_meta(self, node):
meta = self.emptytag(node, 'meta', **node.non_default_attributes())
self.add_meta(meta)
def depart_meta(self, node):
pass
def add_meta(self, tag):
self.meta.append(tag)
self.head.append(tag)
def visit_option(self, node):
if self.context[-1]:
self.body.append(', ')
self.body.append(self.starttag(node, 'span', '', CLASS='option'))
def depart_option(self, node):
self.body.append('</span>')
self.context[-1] += 1
def visit_option_argument(self, node):
self.body.append(node.get('delimiter', ' '))
self.body.append(self.starttag(node, 'var', ''))
def depart_option_argument(self, node):
self.body.append('</var>')
def visit_option_group(self, node):
atts = {}
if ( self.settings.option_limit
and len(node.astext()) > self.settings.option_limit):
atts['colspan'] = 2
self.context.append('</tr>\n<tr><td> </td>')
else:
self.context.append('')
self.body.append(
self.starttag(node, 'td', CLASS='option-group', **atts))
self.body.append('<kbd>')
self.context.append(0) # count number of options
def depart_option_group(self, node):
self.context.pop()
self.body.append('</kbd></td>\n')
self.body.append(self.context.pop())
def visit_option_list(self, node):
self.body.append(
self.starttag(node, 'table', CLASS='docutils option-list',
frame="void", rules="none"))
self.body.append('<col class="option" />\n'
'<col class="description" />\n'
'<tbody valign="top">\n')
def depart_option_list(self, node):
self.body.append('</tbody>\n</table>\n')
def visit_option_list_item(self, node):
self.body.append(self.starttag(node, 'tr', ''))
def depart_option_list_item(self, node):
self.body.append('</tr>\n')
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item()
def should_be_compact_paragraph(self, node):
"""
Determine if the <p> tags around paragraph ``node`` can be omitted.
"""
if (isinstance(node.parent, nodes.document) or
isinstance(node.parent, nodes.compound)):
# Never compact paragraphs in document or compound.
return False
for key, value in node.attlist():
if (node.is_not_default(key) and
not (key == 'classes' and value in
([], ['first'], ['last'], ['first', 'last']))):
# Attribute which needs to survive.
return False
first = isinstance(node.parent[0], nodes.label) # skip label
for child in node.parent.children[first:]:
# only first paragraph can be compact
if isinstance(child, nodes.Invisible):
continue
if child is node:
break
return False
parent_length = len([n for n in node.parent if not isinstance(
n, (nodes.Invisible, nodes.label))])
if ( self.compact_simple
or self.compact_field_list
or self.compact_p and parent_length == 1):
return True
return False
def visit_paragraph(self, node):
if self.should_be_compact_paragraph(node):
self.context.append('')
else:
self.body.append(self.starttag(node, 'p', ''))
self.context.append('</p>\n')
def depart_paragraph(self, node):
self.body.append(self.context.pop())
def visit_problematic(self, node):
if node.hasattr('refid'):
self.body.append('<a href="#%s">' % node['refid'])
self.context.append('</a>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'span', '', CLASS='problematic'))
def depart_problematic(self, node):
self.body.append('</span>')
self.body.append(self.context.pop())
def visit_raw(self, node):
if 'html' in node.get('format', '').split():
t = isinstance(node.parent, nodes.TextElement) and 'span' or 'div'
if node['classes']:
self.body.append(self.starttag(node, t, suffix=''))
self.body.append(node.astext())
if node['classes']:
self.body.append('</%s>' % t)
# Keep non-HTML raw text out of output:
raise nodes.SkipNode
def visit_reference(self, node):
atts = {'class': 'reference'}
if 'refuri' in node:
atts['href'] = node['refuri']
if ( self.settings.cloak_email_addresses
and atts['href'].startswith('mailto:')):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = True
atts['class'] += ' external'
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
atts['href'] = '#' + node['refid']
atts['class'] += ' internal'
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
self.body.append(self.starttag(node, 'a', '', **atts))
def depart_reference(self, node):
self.body.append('</a>')
if not isinstance(node.parent, nodes.TextElement):
self.body.append('\n')
self.in_mailto = False
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision', meta=False)
def depart_revision(self, node):
self.depart_docinfo_item()
def visit_row(self, node):
self.body.append(self.starttag(node, 'tr', ''))
node.column = 0
def depart_row(self, node):
self.body.append('</tr>\n')
def visit_rubric(self, node):
self.body.append(self.starttag(node, 'p', '', CLASS='rubric'))
def depart_rubric(self, node):
self.body.append('</p>\n')
def visit_section(self, node):
self.section_level += 1
self.body.append(
self.starttag(node, 'div', CLASS='section'))
def depart_section(self, node):
self.section_level -= 1
self.body.append('</div>\n')
def visit_sidebar(self, node):
self.body.append(
self.starttag(node, 'div', CLASS='sidebar'))
self.set_first_last(node)
self.in_sidebar = True
def depart_sidebar(self, node):
self.body.append('</div>\n')
self.in_sidebar = False
def visit_status(self, node):
self.visit_docinfo_item(node, 'status', meta=False)
def depart_status(self, node):
self.depart_docinfo_item()
def visit_strong(self, node):
self.body.append(self.starttag(node, 'strong', ''))
def depart_strong(self, node):
self.body.append('</strong>')
def visit_subscript(self, node):
self.body.append(self.starttag(node, 'sub', ''))
def depart_subscript(self, node):
self.body.append('</sub>')
def visit_substitution_definition(self, node):
"""Internal only."""
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.starttag(node, 'p', '',
CLASS='sidebar-subtitle'))
self.context.append('</p>\n')
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h2', '', CLASS='subtitle'))
self.context.append('</h2>\n')
self.in_document_title = len(self.body)
elif isinstance(node.parent, nodes.section):
tag = 'h%s' % (self.section_level + self.initial_header_level - 1)
self.body.append(
self.starttag(node, tag, '', CLASS='section-subtitle') +
self.starttag({}, 'span', '', CLASS='section-subtitle'))
self.context.append('</span></%s>\n' % tag)
def depart_subtitle(self, node):
self.body.append(self.context.pop())
if self.in_document_title:
self.subtitle = self.body[self.in_document_title:-1]
self.in_document_title = 0
self.body_pre_docinfo.extend(self.body)
self.html_subtitle.extend(self.body)
del self.body[:]
def visit_superscript(self, node):
self.body.append(self.starttag(node, 'sup', ''))
def depart_superscript(self, node):
self.body.append('</sup>')
def visit_system_message(self, node):
self.body.append(self.starttag(node, 'div', CLASS='system-message'))
self.body.append('<p class="system-message-title">')
backref_text = ''
if len(node['backrefs']):
backrefs = node['backrefs']
if len(backrefs) == 1:
backref_text = ('; <em><a href="#%s">backlink</a></em>'
% backrefs[0])
else:
i = 1
backlinks = []
for backref in backrefs:
backlinks.append('<a href="#%s">%s</a>' % (backref, i))
i += 1
backref_text = ('; <em>backlinks: %s</em>'
% ', '.join(backlinks))
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('System Message: %s/%s '
'(<tt class="docutils">%s</tt>%s)%s</p>\n'
% (node['type'], node['level'],
self.encode(node['source']), line, backref_text))
def depart_system_message(self, node):
self.body.append('</div>\n')
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
classes = ' '.join(['docutils', self.settings.table_style]).strip()
self.body.append(
self.starttag(node, 'table', CLASS=classes, border="1"))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_target(self, node):
if not ('refuri' in node or 'refid' in node
or 'refname' in node):
self.body.append(self.starttag(node, 'span', '', CLASS='target'))
self.context.append('</span>')
else:
self.context.append('')
def depart_target(self, node):
self.body.append(self.context.pop())
def visit_tbody(self, node):
self.write_colspecs()
self.body.append(self.context.pop()) # '</colgroup>\n' or ''
self.body.append(self.starttag(node, 'tbody', valign='top'))
def depart_tbody(self, node):
self.body.append('</tbody>\n')
def visit_term(self, node):
self.body.append(self.starttag(node, 'dt', ''))
def depart_term(self, node):
"""
Leave the end tag to `self.visit_definition()`, in case there's a
classifier.
"""
pass
def visit_tgroup(self, node):
# Mozilla needs <colgroup>:
self.body.append(self.starttag(node, 'colgroup'))
# Appended by thead or tbody:
self.context.append('</colgroup>\n')
node.stubs = []
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
self.write_colspecs()
self.body.append(self.context.pop()) # '</colgroup>\n'
# There may or may not be a <thead>; this is for <tbody> to use:
self.context.append('')
self.body.append(self.starttag(node, 'thead', valign='bottom'))
def depart_thead(self, node):
self.body.append('</thead>\n')
def visit_title(self, node):
"""Only 6 section levels are supported by HTML."""
check_id = 0 # TODO: is this a bool (False) or a counter?
close_tag = '</p>\n'
if isinstance(node.parent, nodes.topic):
self.body.append(
self.starttag(node, 'p', '', CLASS='topic-title first'))
elif isinstance(node.parent, nodes.sidebar):
self.body.append(
self.starttag(node, 'p', '', CLASS='sidebar-title'))
elif isinstance(node.parent, nodes.Admonition):
self.body.append(
self.starttag(node, 'p', '', CLASS='admonition-title'))
elif isinstance(node.parent, nodes.table):
self.body.append(
self.starttag(node, 'caption', ''))
close_tag = '</caption>\n'
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h1', '', CLASS='title'))
close_tag = '</h1>\n'
self.in_document_title = len(self.body)
else:
assert isinstance(node.parent, nodes.section)
h_level = self.section_level + self.initial_header_level - 1
atts = {}
if (len(node.parent) >= 2 and
isinstance(node.parent[1], nodes.subtitle)):
atts['CLASS'] = 'with-subtitle'
self.body.append(
self.starttag(node, 'h%s' % h_level, '', **atts))
atts = {}
if node.hasattr('refid'):
atts['class'] = 'toc-backref'
atts['href'] = '#' + node['refid']
if atts:
self.body.append(self.starttag({}, 'a', '', **atts))
close_tag = '</a></h%s>\n' % (h_level)
else:
close_tag = '</h%s>\n' % (h_level)
self.context.append(close_tag)
def depart_title(self, node):
self.body.append(self.context.pop())
if self.in_document_title:
self.title = self.body[self.in_document_title:-1]
self.in_document_title = 0
self.body_pre_docinfo.extend(self.body)
self.html_title.extend(self.body)
del self.body[:]
def visit_title_reference(self, node):
self.body.append(self.starttag(node, 'cite', ''))
def depart_title_reference(self, node):
self.body.append('</cite>')
def visit_topic(self, node):
self.body.append(self.starttag(node, 'div', CLASS='topic'))
self.topic_classes = node['classes']
def depart_topic(self, node):
self.body.append('</div>\n')
self.topic_classes = []
def visit_transition(self, node):
self.body.append(self.emptytag(node, 'hr', CLASS='docutils'))
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version', meta=False)
def depart_version(self, node):
self.depart_docinfo_item()
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
class SimpleListChecker(nodes.GenericNodeVisitor):
"""
Raise `nodes.NodeFound` if non-simple list item is encountered.
Here "simple" means a list item containing nothing other than a single
paragraph, a simple list, or a paragraph followed by a simple list.
"""
def default_visit(self, node):
raise nodes.NodeFound
def visit_bullet_list(self, node):
pass
def visit_enumerated_list(self, node):
pass
def visit_list_item(self, node):
children = []
for child in node.children:
if not isinstance(child, nodes.Invisible):
children.append(child)
if (children and isinstance(children[0], nodes.paragraph)
and (isinstance(children[-1], nodes.bullet_list)
or isinstance(children[-1], nodes.enumerated_list))):
children.pop()
if len(children) <= 1:
return
else:
raise nodes.NodeFound
def visit_paragraph(self, node):
raise nodes.SkipNode
def invisible_visit(self, node):
"""Invisible nodes should be ignored."""
raise nodes.SkipNode
visit_comment = invisible_visit
visit_substitution_definition = invisible_visit
visit_target = invisible_visit
visit_pending = invisible_visit
|
the-stack_0_7458 |
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('project_dashboard')
class CeleryConfig(AppConfig):
name = 'project_dashboard.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['dsn'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
@app.task(bind=True)
def debug_task(self):
print(f'Request: {self.request!r}') # pragma: no cover
|
the-stack_0_7459 | from __future__ import unicode_literals
import importlib
import json
import logging
import random
import re
import string
import urlparse
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
import urllib3
logger = logging.getLogger(__name__)
content_types = {
'image': [
'website.content_types.Imgur',
'website.content_types.Image'
],
'video': [
'website.content_types.YouTube',
'website.content_types.Video'
],
'link': [
'website.content_types.Link'
],
'gifv': [
'website.content_types.GifV'
]
}
def get_class_from_string(class_name):
module_name, cls_name = class_name.rsplit(".", 1)
module = importlib.import_module(module_name)
return getattr(module, cls_name)
def detect_post_type(url=None):
# Default
if not url:
return 'text'
else:
return detect_link_type(url)
def find_urls(text):
"""Find any URLs in the given text, return a list of them"""
return re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|'
'(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)
def imgur_preprocessor(url):
url_data = urlparse.urlparse(url)
logger.info('imgurl preprocessor')
logger.info((url_data.netloc == 'imgur.com' or
url_data.netloc == 'i.imgur.com'))
logger.info(url[-4:] == '.gif')
if ((url_data.netloc == 'imgur.com' or
url_data.netloc == 'i.imgur.com') and url[-4:] == '.gif'):
# Switch to gifv
url += 'v'
logger.info('new url {}'.format(url))
return url
def detect_link_type(url):
"""Given a link, get the HEAD and match to known types"""
logger.info("Detecting content type of {}".format(url))
# Get mime type of remote url
try:
http = urllib3.PoolManager()
response = http.request('HEAD', url)
content_type = response.headers.get('content-type')
except Exception as e:
logger.warning("Could not detect content type. Defaulting to "
"link for url: {}, exception: {}".format(url, e))
return 'link'
# Find list of content detectors based on mime
if ('text/html' in content_type and
'imgur.com' in url and
url[-5:] == '.gifv'):
key = 'gifv'
elif content_type in settings.MIME_IMAGES:
key = 'image'
elif content_type in settings.MIME_VIDEO or 'youtube.com' in url:
key = 'video'
elif url:
key = 'link'
else:
return 'text'
logger.info('content type is {}'.format(key))
# Go through content detectors in order, returning if any matches
for content_type in content_types[key]:
cls = get_class_from_string(content_type)()
detected_type = cls.detect(url, content_type)
if detected_type:
return detected_type
def url_filter(text):
"""Given a block of text, add HTML for links and embedded content."""
attachment_type = None
attachment_url = None
# Search for images to render
urls = find_urls(text)
# Render the first
logger.debug('Looking for image links in message {}'.format(
text))
if urls and detect_link_type(urls[0]) == 'image':
logger.info('found image link in message: {}'.format(urls[0]))
attachment_type = 'image'
attachment_url = urls[0]
return {
'message': text,
'attachment_type': attachment_type,
'attachment_url': attachment_url
}
def render_to_json(request, data):
# msgs = {}
# messages_list = messages.get_messages(request)
# count = 0
# for message in messages_list:
# msgs[count] = {'message': message.message, 'level': message.level}
# count += 1
# data['messages'] = msgs
return HttpResponse(
json.dumps(data, ensure_ascii=False, cls=DjangoJSONEncoder),
content_type=request.is_ajax() and "application/json" or "text/html"
)
def generate_pk(length=32):
return ''.join(random.SystemRandom().choice(
'abcdef' + string.digits) for _ in range(length))
|
the-stack_0_7460 | from arm.logicnode.arm_nodes import *
class ExpressionNode(ArmLogicTreeNode):
"""Evaluate a Haxe expression and get its output.
@output Result: the result of the expression."""
bl_idname = 'LNExpressionNode'
bl_label = 'Expression'
arm_version = 1
property0: StringProperty(name='', default='')
def init(self, context):
super(ExpressionNode, self).init(context)
self.add_input('ArmNodeSocketAction', 'In')
self.add_output('ArmNodeSocketAction', 'Out')
self.add_output('NodeSocketShader', 'Result')
def draw_buttons(self, context, layout):
layout.prop(self, 'property0')
add_node(ExpressionNode, category=PKG_AS_CATEGORY, section='haxe')
|
the-stack_0_7462 | import sys
from sys import argv
from subprocess import call
import threading
import webbrowser
import os
from shutil import copy, move, rmtree
from os.path import join, dirname, realpath, exists
from glob import glob
import re
from setuptools import setup, find_packages, Command
directory = dirname(realpath(__file__))
sys.path.insert(0, join(directory, 'escher'))
version = __import__('version').__version__
full_version = __import__('version').__full_version__
package = __import__('version').package
port = 8789
setup(
name='Escher',
version=full_version,
author=package['author'],
url=package['homepage'],
description=package['description'],
keywords=', '.join(package['keywords']),
license=package['license'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Visualization',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent'
],
packages=find_packages(),
include_package_data=True,
data_files=[
(
'share/jupyter/nbextensions/escher',
[
'escher/static/extension.js',
'escher/static/escher.min.js',
'escher/static/escher.min.js.map',
]
),
(
'etc/jupyter/nbconfig/notebook.d',
['escher.json'],
)
],
install_requires=[
'Jinja2>=2.7.3,<3',
'pytest>=4.0.1,<5',
'cobra>=0.5.0',
'jsonschema>=2.4.0,<3',
'ipywidgets>=7.1.0,<8',
'pandas>=0.18'
],
extras_require={
'docs': ['sphinx>=1.2', 'sphinx-rtd-theme>=0.1.6'],
},
)
|
the-stack_0_7463 | import re
import sys
from django import VERSION
from django.conf import settings as django_settings
from django.contrib import admin
from django.db import connection
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.views import debug
from celery import current_app
from redis import ConnectionError
from kitsune.sumo.redis_utils import redis_client
def settings(request):
"""Admin view that displays the django settings."""
settings = debug.get_safe_settings()
sorted_settings = [{"key": key, "value": settings[key]} for key in sorted(settings.keys())]
return render_to_response(
"kadmin/settings.html",
{"pythonpath": sys.path, "settings": sorted_settings, "title": "Settings"},
RequestContext(request, {}),
)
admin.site.register_view("settings", view=settings, name="Settings")
def celery_settings(request):
"""Admin view that displays the celery configuration."""
capital = re.compile("^[A-Z]")
settings = [key for key in dir(current_app.conf) if capital.match(key)]
sorted_settings = [
{
"key": key,
"value": "*****" if "password" in key.lower() else getattr(current_app.conf, key),
}
for key in sorted(settings)
]
return render_to_response(
"kadmin/settings.html",
{"settings": sorted_settings, "title": "Celery Settings"},
RequestContext(request, {}),
)
admin.site.register_view("celery", view=celery_settings, name="Celery Settings")
def env(request):
"""Admin view that displays env info."""
return render_to_response(
"kadmin/env_view.html",
{"request": request, "pythonver": sys.version, "djangover": VERSION},
)
admin.site.register_view("env", view=env, name="Environment")
def schema_version(request):
"""Admin view that displays the current schema_version."""
cursor = connection.cursor()
cursor.execute("SELECT version FROM schema_version")
version = [x for x in cursor][0][0]
return render_to_response(
"kadmin/schema.html",
{"schema_version": version, "title": "Schema Version"},
RequestContext(request, {}),
)
admin.site.register_view("schema", view=schema_version, name="Database Schema Version")
def redis_info(request):
"""Admin view that displays redis INFO+CONFIG output for all backends."""
redis_info = {}
for key in list(django_settings.REDIS_BACKENDS.keys()):
redis_info[key] = {}
client = redis_client(key)
redis_info[key]["connection"] = django_settings.REDIS_BACKENDS[key]
try:
cfg = client.config_get()
redis_info[key]["config"] = [{"key": k, "value": cfg[k]} for k in sorted(cfg)]
info = client.info()
redis_info[key]["info"] = [{"key": k, "value": info[k]} for k in sorted(info)]
except ConnectionError:
redis_info[key]["down"] = True
return render_to_response(
"kadmin/redis.html",
{"redis_info": redis_info, "title": "Redis Information"},
RequestContext(request, {}),
)
admin.site.register_view("redis", view=redis_info, name="Redis Information")
|
the-stack_0_7464 | # ------------------------------------------------------------------------
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
import os
def run_test(env, xml_file, test):
"""
Run test with the given SCons Environment, dumping Valgrind
results to the given XML file. If no Valgrind run is desired
simply pass in an empty string or None for the xml_file
parameter.
Note that the test path should not include the build directory
where binaries are placed. The build directory will be prepended
to the test path automatically.
"""
build_dir = env.get('BUILD_DIR')
result_dir = os.path.join(build_dir, 'test_out/')
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
# Dump test report in XML format to the results directory.
env.AppendENVPath('GTEST_OUTPUT', ['xml:' + result_dir])
# Make sure the Google Test libraries are in the dynamic
# linker/loader path.
env.AppendENVPath('LD_LIBRARY_PATH', [build_dir])
env.AppendENVPath('LD_LIBRARY_PATH', ['./extlibs/gtest/gtest-1.7.0/lib/.libs'])
test_cmd = os.path.join(build_dir, test)
have_valgrind = False
if env.get('TARGET_OS') not in ['windows']:
have_valgrind = True
if xml_file and have_valgrind:
# Environment variables to be made available during the
# Valgrind run.
valgrind_environment = ''
# GLib uses a custom memory allocation scheme that can
# sometimes confuse Valgrind. Configure GLib to be Valgrind
# friendly.
valgrind_environment += 'G_DEBUG=gc-friendly G_SLICE=always-malloc'
# Valgrind suppressions file.
suppression_file = env.File('#tools/valgrind/iotivity.supp').srcnode().path
# Set up to run the test under Valgrind.
test_cmd = '%s valgrind --leak-check=full --suppressions=%s --xml=yes --xml-file=%s %s' % (valgrind_environment, suppression_file, xml_file, test_cmd)
ut = env.Command('ut', None, test_cmd)
env.AlwaysBuild('ut')
|
the-stack_0_7466 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
BASE_DIR = os.path.dirname(__file__)
VERSION_FILENAME = os.path.join(
BASE_DIR, "src", "opentelemetry", "test", "version.py"
)
PACKAGE_INFO = {}
with open(VERSION_FILENAME) as f:
exec(f.read(), PACKAGE_INFO)
setuptools.setup(version=PACKAGE_INFO["__version__"])
|
the-stack_0_7467 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import os
from parlai.core.message import Message
from parlai.core.opt import Opt
from parlai.core.teachers import FixedDialogTeacher, DialogTeacher, ParlAIDialogTeacher
from parlai.tasks.wrapper.agents import AbstractWrapperTeacher
from parlai.utils.io import PathManager
from .build import build
def get_sentence_tokenizer():
"""
Loads the nltk sentence tokenizer.
"""
try:
import nltk
except ImportError:
raise ImportError('Please install nltk (e.g. pip install nltk).')
# nltk-specific setup
st_path = 'tokenizers/punkt/{0}.pickle'.format('english')
try:
sent_tok = nltk.data.load(st_path)
except LookupError:
nltk.download('punkt')
sent_tok = nltk.data.load(st_path)
return sent_tok
class IndexTeacher(FixedDialogTeacher):
"""
Hand-written SQuAD teacher, which loads the json squad data and implements its own
`act()` method for interacting with student agent, rather than inheriting from the
core Dialog Teacher. This code is here as an example of rolling your own without
inheritance.
This teacher also provides access to the "answer_start" indices that specify the
location of the answer in the context.
"""
def __init__(self, opt, shared=None):
build(opt)
super().__init__(opt, shared)
if self.datatype.startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
datapath = os.path.join(opt['datapath'], 'SQuAD', suffix + '-v1.1.json')
self.data = self._setup_data(datapath)
self.id = 'squad'
self.reset()
def num_examples(self):
return len(self.examples)
def num_episodes(self):
return self.num_examples()
def get(self, episode_idx, entry_idx=None):
article_idx, paragraph_idx, qa_idx = self.examples[episode_idx]
article = self.squad[article_idx]
paragraph = article['paragraphs'][paragraph_idx]
qa = paragraph['qas'][qa_idx]
question = qa['question']
answers = []
answer_starts = []
for a in qa['answers']:
answers.append(a['text'])
answer_starts.append(a['answer_start'])
context = paragraph['context']
action = {
'id': 'squad',
'text': context + '\n' + question,
'labels': answers,
'episode_done': True,
'answer_starts': answer_starts,
}
return action
def _setup_data(self, path):
with PathManager.open(path) as data_file:
self.squad = json.load(data_file)['data']
self.examples = []
for article_idx in range(len(self.squad)):
article = self.squad[article_idx]
for paragraph_idx in range(len(article['paragraphs'])):
paragraph = article['paragraphs'][paragraph_idx]
num_questions = len(paragraph['qas'])
for qa_idx in range(num_questions):
self.examples.append((article_idx, paragraph_idx, qa_idx))
class DefaultTeacher(DialogTeacher):
"""
This version of SQuAD inherits from the core Dialog Teacher, which just requires it
to define an iterator over its data `setup_data` in order to inherit basic metrics,
a default `act` function.
For SQuAD, this does not efficiently store the paragraphs in memory.
"""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
opt['datafile'] = os.path.join(opt['datapath'], 'SQuAD', suffix + '-v1.1.json')
self.id = 'squad'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with PathManager.open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
answers = tuple(a['text'] for a in qa['answers'])
context = paragraph['context']
yield (context + '\n' + question, answers), True
class OpensquadTeacher(DialogTeacher):
"""
This version of SQuAD inherits from the core Dialog Teacher, which just requires it
to define an iterator over its data `setup_data` in order to inherit basic metrics,
a default `act` function.
Note: This teacher omits the context paragraph
"""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
opt['datafile'] = os.path.join(opt['datapath'], 'SQuAD', suffix + '-v1.1.json')
self.id = 'squad'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with PathManager.open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
answers = (a['text'] for a in qa['answers'])
yield (question, answers), True
class TitleTeacher(DefaultTeacher):
"""
This version of SquAD inherits from the Default Teacher.
The only
difference is that the 'text' field of an observation will contain
the title of the article separated by a newline from the paragraph and the
query.
Note: The title will contain underscores, as it is the part of the link for
the Wikipedia page; i.e., the article is at the site:
https://en.wikipedia.org/wiki/{TITLE}
Depending on your task, you may wish to remove underscores.
"""
def __init__(self, opt, shared=None):
self.id = 'squad_title'
build(opt)
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with PathManager.open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
title = article['title']
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
answers = (a['text'] for a in qa['answers'])
context = paragraph['context']
yield ('\n'.join([title, context, question]), answers), True
class FulldocTeacher(ParlAIDialogTeacher):
def __init__(self, opt, shared=None):
build(opt)
opt = copy.deepcopy(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'valid'
datafile = os.path.join(
opt['datapath'], 'SQuAD-fulldoc', "squad_fulldocs." + suffix + ":ordered"
)
opt['parlaidialogteacher_datafile'] = datafile
super().__init__(opt, shared)
self.id = 'squad-fulldoc'
self.reset()
class SentenceTeacher(IndexTeacher):
"""
Teacher where the label(s) are the sentences that contain the true answer.
Some punctuation may be removed from the context and the answer for
tokenization purposes.
If `include_context` is False, the teacher returns action dict in the
following format:
{
'context': <context>,
'text': <question>,
'labels': <sentences containing the true answer>,
'label_candidates': <all sentences in the context>,
'episode_done': True,
'answer_starts': <index of start of answer in context>
}
Otherwise, the 'text' field contains <context>\n<question> and there is
no separate context field.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.sent_tok = get_sentence_tokenizer()
self.include_context = opt.get('include_context', False)
@staticmethod
def add_cmdline_args(argparser):
agent = argparser.add_argument_group('SQuAD Sentence Teacher Arguments')
agent.add_argument(
'--include-context',
type='bool',
default=False,
help='include context within text instead of as a ' 'separate field',
)
def get(self, episode_idx, entry_idx=None):
article_idx, paragraph_idx, qa_idx = self.examples[episode_idx]
article = self.squad[article_idx]
paragraph = article['paragraphs'][paragraph_idx]
qa = paragraph['qas'][qa_idx]
context = paragraph['context']
question = qa['question']
answers = [a['text'] for a in qa['answers']]
# remove '.', '?', '!' from answers for proper sentence
# tokenization
edited_answers = []
for answer in answers:
new_answer = answer.replace('.', '').replace('?', '').replace('!', '')
context = context.replace(answer, new_answer)
edited_answers.append(new_answer)
edited_sentences = self.sent_tok.tokenize(context)
labels = []
label_starts = []
for sentence in edited_sentences:
for answer in edited_answers:
if answer in sentence and sentence not in labels:
labels.append(sentence)
label_starts.append(context.index(sentence))
break
action = {
'context': context,
'text': question,
'labels': labels,
'label_candidates': edited_sentences,
'episode_done': True,
'answer_starts': label_starts,
}
if self.include_context:
action['text'] = action['context'] + '\n' + action['text']
del action['context']
return action
class FulldocsentenceTeacher(FulldocTeacher):
"""
Teacher which contains the question as the text, the sentences as the label
candidates, and the label as the sentence containing the answer.
Some punctuation may be removed for tokenization purposes.
If `include_context` is False, the teacher returns action dict in the
following format:
{
'context': <context>,
'text': <question>,
'labels': <sentences containing the true answer>,
'label_candidates': <all sentences in the context>,
'episode_done': True,
'answer_starts': <index of start of answer in context>
}
Otherwise, the 'text' field contains <context>\n<question> and there is
no separate context field.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.sent_tok = get_sentence_tokenizer()
self.include_context = opt.get('include_context', False)
@staticmethod
def add_cmdline_args(argparser):
agent = argparser.add_argument_group('SQuAD Fulldoc Sentence Teacher Arguments')
agent.add_argument(
'--include-context',
type='bool',
default=False,
help='include context within text instead of as a ' 'separate field',
)
def get(self, episode_idx, entry_idx=None):
action = {}
episode = self.episodes[episode_idx][entry_idx]
context = ' '.join(episode['text'].split('\n')[:-1]).replace(
'\xa0', ' '
) # get rid of non breaking space characters
question = episode['text'].split('\n')[-1]
label_field = 'labels' if 'labels' in episode else 'eval_labels'
answers = []
for answer in episode[label_field]:
new_answer = answer.replace('.', '').replace('?', '').replace('!', '')
context = context.replace(answer, new_answer)
answers.append(new_answer)
sentences = self.sent_tok.tokenize(context)
labels = []
label_starts = []
for sentence in sentences:
for answer in answers:
if answer in sentence and sentence not in labels:
labels.append(sentence)
label_starts.append(context.index(sentence))
action = {
'context': context,
'text': question,
label_field: labels,
'answer_starts': label_starts,
'label_candidates': sentences,
'episode_done': episode['episode_done'],
}
if self.include_context:
action['text'] = action['context'] + '\n' + action['text']
del action['context']
return action
class SquadQATeacher(AbstractWrapperTeacher):
"""
Wrapper Teacher over SQuAD to get only the passage, and ignore the question.
"""
@classmethod
def add_cmdline_args(cls, parser):
parser.set_defaults(wrapper_task='squad')
def __init__(self, opt: Opt, shared=None):
super().__init__(opt, shared)
def _edit_action(self, act: Message) -> Message:
"""
# SQuAD returns passage and question both, only passage required for task.
"""
passage = act['text'].split('\n')[0]
act.force_set('text', passage)
return act
|
the-stack_0_7468 | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from botocore.vendored import requests
from botocore.vendored.requests.packages import urllib3
def _exception_from_packed_args(exception_cls, args=None, kwargs=None):
# This is helpful for reducing Exceptions that only accept kwargs as
# only positional arguments can be provided for __reduce__
# Ideally, this would also be a class method on the BotoCoreError
# but instance methods cannot be pickled.
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return exception_cls(*args, **kwargs)
class BotoCoreError(Exception):
"""
The base exception class for BotoCore exceptions.
:ivar msg: The descriptive message associated with the error.
"""
fmt = 'An unspecified error occurred'
def __init__(self, **kwargs):
msg = self.fmt.format(**kwargs)
Exception.__init__(self, msg)
self.kwargs = kwargs
def __reduce__(self):
return _exception_from_packed_args, (self.__class__, None, self.kwargs)
class DataNotFoundError(BotoCoreError):
"""
The data associated with a particular path could not be loaded.
:ivar data_path: The data path that the user attempted to load.
"""
fmt = 'Unable to load data for: {data_path}'
class UnknownServiceError(DataNotFoundError):
"""Raised when trying to load data for an unknown service.
:ivar service_name: The name of the unknown service.
"""
fmt = (
"Unknown service: '{service_name}'. Valid service names are: "
"{known_service_names}")
class ApiVersionNotFoundError(BotoCoreError):
"""
The data associated with either the API version or a compatible one
could not be loaded.
:ivar data_path: The data path that the user attempted to load.
:ivar api_version: The API version that the user attempted to load.
"""
fmt = 'Unable to load data {data_path} for: {api_version}'
class HTTPClientError(BotoCoreError):
fmt = 'An HTTP Client raised an unhandled exception: {error}'
def __init__(self, request=None, response=None, **kwargs):
self.request = request
self.response = response
super(HTTPClientError, self).__init__(**kwargs)
def __reduce__(self):
return _exception_from_packed_args, (
self.__class__, (self.request, self.response), self.kwargs)
class ConnectionError(BotoCoreError):
fmt = 'An HTTP Client failed to establish a connection: {error}'
class InvalidIMDSEndpointError(BotoCoreError):
fmt = 'Invalid endpoint EC2 Instance Metadata endpoint: {endpoint}'
class EndpointConnectionError(ConnectionError):
fmt = 'Could not connect to the endpoint URL: "{endpoint_url}"'
class SSLError(ConnectionError, requests.exceptions.SSLError):
fmt = 'SSL validation failed for {endpoint_url} {error}'
class ConnectionClosedError(HTTPClientError):
fmt = (
'Connection was closed before we received a valid response '
'from endpoint URL: "{endpoint_url}".')
class ReadTimeoutError(HTTPClientError, requests.exceptions.ReadTimeout,
urllib3.exceptions.ReadTimeoutError):
fmt = 'Read timeout on endpoint URL: "{endpoint_url}"'
class ConnectTimeoutError(ConnectionError, requests.exceptions.ConnectTimeout):
fmt = 'Connect timeout on endpoint URL: "{endpoint_url}"'
class ProxyConnectionError(ConnectionError, requests.exceptions.ProxyError):
fmt = 'Failed to connect to proxy URL: "{proxy_url}"'
class NoCredentialsError(BotoCoreError):
"""
No credentials could be found.
"""
fmt = 'Unable to locate credentials'
class PartialCredentialsError(BotoCoreError):
"""
Only partial credentials were found.
:ivar cred_var: The missing credential variable name.
"""
fmt = 'Partial credentials found in {provider}, missing: {cred_var}'
class CredentialRetrievalError(BotoCoreError):
"""
Error attempting to retrieve credentials from a remote source.
:ivar provider: The name of the credential provider.
:ivar error_msg: The msg explaining why credentials could not be
retrieved.
"""
fmt = 'Error when retrieving credentials from {provider}: {error_msg}'
class UnknownSignatureVersionError(BotoCoreError):
"""
Requested Signature Version is not known.
:ivar signature_version: The name of the requested signature version.
"""
fmt = 'Unknown Signature Version: {signature_version}.'
class ServiceNotInRegionError(BotoCoreError):
"""
The service is not available in requested region.
:ivar service_name: The name of the service.
:ivar region_name: The name of the region.
"""
fmt = 'Service {service_name} not available in region {region_name}'
class BaseEndpointResolverError(BotoCoreError):
"""Base error for endpoint resolving errors.
Should never be raised directly, but clients can catch
this exception if they want to generically handle any errors
during the endpoint resolution process.
"""
class NoRegionError(BaseEndpointResolverError):
"""No region was specified."""
fmt = 'You must specify a region.'
class UnknownEndpointError(BaseEndpointResolverError, ValueError):
"""
Could not construct an endpoint.
:ivar service_name: The name of the service.
:ivar region_name: The name of the region.
"""
fmt = (
'Unable to construct an endpoint for '
'{service_name} in region {region_name}')
class UnknownFIPSEndpointError(BaseEndpointResolverError):
"""
Could not construct a FIPS endpoint.
:ivar service_name: The name of the service.
:ivar region_name: The name of the region.
"""
fmt = (
'The provided FIPS pseudo-region "{region_name}" is not known for '
'the service "{service_name}". A FIPS compliant endpoint cannot be '
'constructed.'
)
class ProfileNotFound(BotoCoreError):
"""
The specified configuration profile was not found in the
configuration file.
:ivar profile: The name of the profile the user attempted to load.
"""
fmt = 'The config profile ({profile}) could not be found'
class ConfigParseError(BotoCoreError):
"""
The configuration file could not be parsed.
:ivar path: The path to the configuration file.
"""
fmt = 'Unable to parse config file: {path}'
class ConfigNotFound(BotoCoreError):
"""
The specified configuration file could not be found.
:ivar path: The path to the configuration file.
"""
fmt = 'The specified config file ({path}) could not be found.'
class MissingParametersError(BotoCoreError):
"""
One or more required parameters were not supplied.
:ivar object: The object that has missing parameters.
This can be an operation or a parameter (in the
case of inner params). The str() of this object
will be used so it doesn't need to implement anything
other than str().
:ivar missing: The names of the missing parameters.
"""
fmt = ('The following required parameters are missing for '
'{object_name}: {missing}')
class ValidationError(BotoCoreError):
"""
An exception occurred validating parameters.
Subclasses must accept a ``value`` and ``param``
argument in their ``__init__``.
:ivar value: The value that was being validated.
:ivar param: The parameter that failed validation.
:ivar type_name: The name of the underlying type.
"""
fmt = ("Invalid value ('{value}') for param {param} "
"of type {type_name} ")
class ParamValidationError(BotoCoreError):
fmt = 'Parameter validation failed:\n{report}'
# These exceptions subclass from ValidationError so that code
# can just 'except ValidationError' to catch any possibly validation
# error.
class UnknownKeyError(ValidationError):
"""
Unknown key in a struct parameter.
:ivar value: The value that was being checked.
:ivar param: The name of the parameter.
:ivar choices: The valid choices the value can be.
"""
fmt = ("Unknown key '{value}' for param '{param}'. Must be one "
"of: {choices}")
class RangeError(ValidationError):
"""
A parameter value was out of the valid range.
:ivar value: The value that was being checked.
:ivar param: The parameter that failed validation.
:ivar min_value: The specified minimum value.
:ivar max_value: The specified maximum value.
"""
fmt = ('Value out of range for param {param}: '
'{min_value} <= {value} <= {max_value}')
class UnknownParameterError(ValidationError):
"""
Unknown top level parameter.
:ivar name: The name of the unknown parameter.
:ivar operation: The name of the operation.
:ivar choices: The valid choices the parameter name can be.
"""
fmt = (
"Unknown parameter '{name}' for operation {operation}. Must be one "
"of: {choices}"
)
class InvalidRegionError(ValidationError, ValueError):
"""
Invalid region_name provided to client or resource.
:ivar region_name: region_name that was being validated.
"""
fmt = (
"Provided region_name '{region_name}' doesn't match a supported format."
)
class AliasConflictParameterError(ValidationError):
"""
Error when an alias is provided for a parameter as well as the original.
:ivar original: The name of the original parameter.
:ivar alias: The name of the alias
:ivar operation: The name of the operation.
"""
fmt = (
"Parameter '{original}' and its alias '{alias}' were provided "
"for operation {operation}. Only one of them may be used."
)
class UnknownServiceStyle(BotoCoreError):
"""
Unknown style of service invocation.
:ivar service_style: The style requested.
"""
fmt = 'The service style ({service_style}) is not understood.'
class PaginationError(BotoCoreError):
fmt = 'Error during pagination: {message}'
class OperationNotPageableError(BotoCoreError):
fmt = 'Operation cannot be paginated: {operation_name}'
class ChecksumError(BotoCoreError):
"""The expected checksum did not match the calculated checksum.
"""
fmt = ('Checksum {checksum_type} failed, expected checksum '
'{expected_checksum} did not match calculated checksum '
'{actual_checksum}.')
class UnseekableStreamError(BotoCoreError):
"""Need to seek a stream, but stream does not support seeking.
"""
fmt = ('Need to rewind the stream {stream_object}, but stream '
'is not seekable.')
class WaiterError(BotoCoreError):
"""Waiter failed to reach desired state."""
fmt = 'Waiter {name} failed: {reason}'
def __init__(self, name, reason, last_response):
super(WaiterError, self).__init__(name=name, reason=reason)
self.last_response = last_response
class IncompleteReadError(BotoCoreError):
"""HTTP response did not return expected number of bytes."""
fmt = ('{actual_bytes} read, but total bytes '
'expected is {expected_bytes}.')
class InvalidExpressionError(BotoCoreError):
"""Expression is either invalid or too complex."""
fmt = 'Invalid expression {expression}: Only dotted lookups are supported.'
class UnknownCredentialError(BotoCoreError):
"""Tried to insert before/after an unregistered credential type."""
fmt = 'Credential named {name} not found.'
class WaiterConfigError(BotoCoreError):
"""Error when processing waiter configuration."""
fmt = 'Error processing waiter config: {error_msg}'
class UnknownClientMethodError(BotoCoreError):
"""Error when trying to access a method on a client that does not exist."""
fmt = 'Client does not have method: {method_name}'
class UnsupportedSignatureVersionError(BotoCoreError):
"""Error when trying to use an unsupported Signature Version."""
fmt = 'Signature version is not supported: {signature_version}'
class ClientError(Exception):
MSG_TEMPLATE = (
'An error occurred ({error_code}) when calling the {operation_name} '
'operation{retry_info}: {error_message}')
def __init__(self, error_response, operation_name):
retry_info = self._get_retry_info(error_response)
error = error_response.get('Error', {})
msg = self.MSG_TEMPLATE.format(
error_code=error.get('Code', 'Unknown'),
error_message=error.get('Message', 'Unknown'),
operation_name=operation_name,
retry_info=retry_info,
)
super(ClientError, self).__init__(msg)
self.response = error_response
self.operation_name = operation_name
def _get_retry_info(self, response):
retry_info = ''
if 'ResponseMetadata' in response:
metadata = response['ResponseMetadata']
if metadata.get('MaxAttemptsReached', False):
if 'RetryAttempts' in metadata:
retry_info = (' (reached max retries: %s)' %
metadata['RetryAttempts'])
return retry_info
def __reduce__(self):
# Subclasses of ClientError's are dynamically generated and
# cannot be pickled unless they are attributes of a
# module. So at the very least return a ClientError back.
return ClientError, (self.response, self.operation_name)
class EventStreamError(ClientError):
pass
class UnsupportedTLSVersionWarning(Warning):
"""Warn when an openssl version that uses TLS 1.2 is required"""
pass
class ImminentRemovalWarning(Warning):
pass
class InvalidDNSNameError(BotoCoreError):
"""Error when virtual host path is forced on a non-DNS compatible bucket"""
fmt = (
'Bucket named {bucket_name} is not DNS compatible. Virtual '
'hosted-style addressing cannot be used. The addressing style '
'can be configured by removing the addressing_style value '
'or setting that value to \'path\' or \'auto\' in the AWS Config '
'file or in the botocore.client.Config object.'
)
class InvalidS3AddressingStyleError(BotoCoreError):
"""Error when an invalid path style is specified"""
fmt = (
'S3 addressing style {s3_addressing_style} is invalid. Valid options '
'are: \'auto\', \'virtual\', and \'path\''
)
class UnsupportedS3ArnError(BotoCoreError):
"""Error when S3 ARN provided to Bucket parameter is not supported"""
fmt = (
'S3 ARN {arn} provided to "Bucket" parameter is invalid. Only '
'ARNs for S3 access-points are supported.'
)
class UnsupportedS3ControlArnError(BotoCoreError):
"""Error when S3 ARN provided to S3 control parameter is not supported"""
fmt = (
'S3 ARN "{arn}" provided is invalid for this operation. {msg}'
)
class InvalidHostLabelError(BotoCoreError):
"""Error when an invalid host label would be bound to an endpoint"""
fmt = (
'Invalid host label to be bound to the hostname of the endpoint: '
'"{label}".'
)
class UnsupportedOutpostResourceError(BotoCoreError):
"""Error when S3 Outpost ARN provided to Bucket parameter is incomplete"""
fmt = (
'S3 Outpost ARN resource "{resource_name}" provided to "Bucket" '
'parameter is invalid. Only ARNs for S3 Outpost arns with an '
'access-point sub-resource are supported.'
)
class UnsupportedS3ConfigurationError(BotoCoreError):
"""Error when an unsupported configuration is used with access-points"""
fmt = (
'Unsupported configuration when using S3: {msg}'
)
class UnsupportedS3AccesspointConfigurationError(BotoCoreError):
"""Error when an unsupported configuration is used with access-points"""
fmt = (
'Unsupported configuration when using S3 access-points: {msg}'
)
class InvalidEndpointDiscoveryConfigurationError(BotoCoreError):
"""Error when invalid value supplied for endpoint_discovery_enabled"""
fmt = (
'Unsupported configuration value for endpoint_discovery_enabled. '
'Expected one of ("true", "false", "auto") but got {config_value}.'
)
class UnsupportedS3ControlConfigurationError(BotoCoreError):
"""Error when an unsupported configuration is used with S3 Control"""
fmt = (
'Unsupported configuration when using S3 Control: {msg}'
)
class InvalidRetryConfigurationError(BotoCoreError):
"""Error when invalid retry configuration is specified"""
fmt = (
'Cannot provide retry configuration for "{retry_config_option}". '
'Valid retry configuration options are: \'max_attempts\''
)
class InvalidMaxRetryAttemptsError(InvalidRetryConfigurationError):
"""Error when invalid retry configuration is specified"""
fmt = (
'Value provided to "max_attempts": {provided_max_attempts} must '
'be an integer greater than or equal to {min_value}.'
)
class InvalidRetryModeError(InvalidRetryConfigurationError):
"""Error when invalid retry mode configuration is specified"""
fmt = (
'Invalid value provided to "mode": "{provided_retry_mode}" must '
'be one of: "legacy", "standard", "adaptive"'
)
class InvalidS3UsEast1RegionalEndpointConfigError(BotoCoreError):
"""Error for invalid s3 us-east-1 regional endpoints configuration"""
fmt = (
'S3 us-east-1 regional endpoint option '
'{s3_us_east_1_regional_endpoint_config} is '
'invalid. Valid options are: "legacy", "regional"'
)
class InvalidSTSRegionalEndpointsConfigError(BotoCoreError):
"""Error when invalid sts regional endpoints configuration is specified"""
fmt = (
'STS regional endpoints option {sts_regional_endpoints_config} is '
'invalid. Valid options are: "legacy", "regional"'
)
class StubResponseError(BotoCoreError):
fmt = 'Error getting response stub for operation {operation_name}: {reason}'
class StubAssertionError(StubResponseError, AssertionError):
pass
class UnStubbedResponseError(StubResponseError):
pass
class InvalidConfigError(BotoCoreError):
fmt = '{error_msg}'
class InfiniteLoopConfigError(InvalidConfigError):
fmt = (
'Infinite loop in credential configuration detected. Attempting to '
'load from profile {source_profile} which has already been visited. '
'Visited profiles: {visited_profiles}'
)
class RefreshWithMFAUnsupportedError(BotoCoreError):
fmt = 'Cannot refresh credentials: MFA token required.'
class MD5UnavailableError(BotoCoreError):
fmt = "This system does not support MD5 generation."
class MetadataRetrievalError(BotoCoreError):
fmt = "Error retrieving metadata: {error_msg}"
class UndefinedModelAttributeError(Exception):
pass
class MissingServiceIdError(UndefinedModelAttributeError):
fmt = (
"The model being used for the service {service_name} is missing the "
"serviceId metadata property, which is required."
)
def __init__(self, **kwargs):
msg = self.fmt.format(**kwargs)
Exception.__init__(self, msg)
self.kwargs = kwargs
class SSOError(BotoCoreError):
fmt = "An unspecified error happened when resolving SSO credentials"
class SSOTokenLoadError(SSOError):
fmt = "Error loading SSO Token: {error_msg}"
class UnauthorizedSSOTokenError(SSOError):
fmt = (
"The SSO session associated with this profile has expired or is "
"otherwise invalid. To refresh this SSO session run aws sso login "
"with the corresponding profile."
)
class CapacityNotAvailableError(BotoCoreError):
fmt = (
'Insufficient request capacity available.'
)
class InvalidProxiesConfigError(BotoCoreError):
fmt = (
'Invalid configuration value(s) provided for proxies_config.'
)
|
the-stack_0_7470 | """
==========================================
Using cloudknot to run pyAFQ on AWS batch:
==========================================
One of the purposes of ``pyAFQ`` is to analyze large-scale openly-available datasets,
such as those in the `Human Connectome Project <https://www.humanconnectome.org/>`_.
To analyze these datasets, large amounts of compute are needed. One way to gain access
to massive computational power is by using cloud computing. Here, we will demonstrate
how to use ``pyAFQ`` in the Amazon Web Services cloud.
We will rely on the `AWS Batch Service <https://aws.amazon.com/batch/>`_ , and we will
submit work into AWS Batch using software that our group developed called
`Cloudknot <https://nrdg.github.io/cloudknot/>`_.
"""
##########################################################################
# Import cloudknot and set the AWS region within which computations will take place. Setting a
# region is important, because if the data that you are analyzing is stored in
# `AWS S3 <https://aws.amazon.com/s3/>`_ in a particular region, it is best to run the computation
# in that region as well. That is because AWS charges for inter-region transfer of data.
import cloudknot as ck
ck.set_region('us-east-1')
##########################################################################
# Define the function to use
# --------------------------
# ``Cloudknot`` uses the single program multiple data paradigm of computing. This means that the same
# function will be run on multiple different inputs. For example, a ``pyAFQ`` processing function run
# on multiple different subjects in a dataset.
# Below, we define the function that we will use. Notice that ``Cloudknot`` functions include the
# import statements of the dependencies used. This is necessary so that ``Cloudknot`` knows
# what dependencies to install into AWS Batch to run this function.
def afq_process_subject(subject):
# define a function that each job will run
# In this case, each process does a single subject
import logging
import s3fs
# all imports must be at the top of the function
# cloudknot installs the appropriate packages from pip
import AFQ.data as afqd
import AFQ.api as api
import AFQ.definitions.mask as afm
# set logging level to your choice
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
# Download the given subject to your local machine from s3
study_ixi = afqd.S3BIDSStudy(
"my_study",
"my_study_bucket",
"my_study_prefix",
subjects=[subject],
anon=False)
study_ixi.download(
"local_bids_dir",
include_derivs=["pipeline_name"])
# you can optionally provide your own segmentation file
# in this case, we look for a file with suffix 'seg'
# in the 'pipeline_name' pipeline,
# and we consider all non-zero labels to be a part of the brain
brain_mask = afm.LabelledMaskFile(
'seg', {'scope': 'pipeline_name'}, exclusive_labels=[0])
# define the api AFQ object
myafq = api.AFQ(
local_bids_dir,
dmriprep="pipeline_name",
brain_mask=brain_mask,
viz_backend='plotly', # this will generate both interactive html and GIFs
scalars=["dki_fa", "dki_md"])
# export_all runs the entire pipeline and creates many useful derivates
myafq.export_all()
# upload the results to some location on s3
myafq.upload_to_s3(
s3fs.S3FileSystem(),
f"my_study_bucket/my_study_prefix/derivatives/afq")
##########################################################################
# Here we provide a list of subjects that we have selected to process
# to randomly select 3 subjects without replacement, instead do:
# subjects = [[1], [2], [3]]
# see the docstring for S3BIDSStudy.__init__ for more information
subjects = [123456, 123457, 123458]
##########################################################################
# Defining a ``Knot`` instance
# ---------------------------------
# We instantiate a class instance of the :class:`ck.Knot` class. This object will be used to run your jobs.
# The object is instantiated with the `'AmazonS3FullAccess'` policy, so that it can write the results
# out to S3, into a bucket that you have write permissions on.
# Setting the `bid_percentage` key-word makes AWS Batch use
# `spot EC2 instances <https://aws.amazon.com/ec2/spot/>`_ for the computation.
# This can result in substantial cost-savings, as spot compute instances can cost
# much less than on-demand instances. However, not that spot instances can also
# be evicted, so if completing all of the work is very time-sensitive, do not set this
# key-word argument. Using the `image_github_installs` key-word argument will
# install pyAFQ from GitHub. You can also specify other forks and branches to
# install from.
knot = ck.Knot(
name='afq_process_subject-201009-0',
func=afq_process_subject,
base_image='python:3.8',
image_github_installs="https://github.com/yeatmanlab/pyAFQ.git",
pars_policies=('AmazonS3FullAccess',),
bid_percentage=100)
##########################################################################
# Launching the computation
# --------------------------------
# The :meth:`map` method of the :class:`Knot object maps each of the inputs provided
# as a sequence onto the function and executes the function on each one of them in
# parallel.
result_futures = knot.map(subjects)
##########################################################################
# Once computations have started, you can call the following
# function to view the progress of jobs::
#
# knot.view_jobs()
#
# You can also view the status of a specific job::
#
# knot.jobs[0].status
##########################################################################
# When all jobs are finished, remember to use the :meth:`clobber` method to
# destroy all of the AWS resources created by the :class:`Knot`
result_futures.result()
knot.clobber(clobber_pars=True, clobber_repo=True, clobber_image=True)
##########################################################################
# In a second :class:`Knot` object, we use a function that takes the resulting profiles of each subject
# and combines them into one csv file.
def afq_combine_profiles(dummy_argument):
from AFQ.api import download_and_combine_afq_profiles
download_and_combine_afq_profiles(
"temp", "my_study_bucket", "my_study_prefix/derivatives/afq")
knot2 = ck.Knot(
name='afq_combine_subjects-201009-0',
func=afq_combine_profiles,
base_image='python:3.8',
image_github_installs="https://github.com/yeatmanlab/pyAFQ.git",
pars_policies=('AmazonS3FullAccess',),
bid_percentage=100)
##########################################################################
# This knot is called with a dummy argument, which is not used within the function itself. The
# `job_type` key-word argument is used to signal to ``Cloudknot`` that only one job is submitted
# rather than the default array of jobs.
result_futures2 = knot2.map(["dummy_argument"], job_type="independent")
result_futures2.result()
knot2.clobber(clobber_pars=True, clobber_repo=True, clobber_image=True)
|
the-stack_0_7473 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 19-4-24 下午6:42
# @Author : MaybeShewill-CV
# @Site : https://github.com/MaybeShewill-CV/lanenet-lane-detection
# @File : vgg16_based_fcn.py
# @IDE: PyCharm
"""
Implement VGG16 based fcn net for semantic segmentation
"""
import collections
import tensorflow as tf
from config import global_config
from semantic_segmentation_zoo import cnn_basenet
CFG = global_config.cfg
class VGG16FCN(cnn_basenet.CNNBaseModel):
"""
VGG 16 based fcn net for semantic segmentation
"""
def __init__(self, phase):
"""
"""
super(VGG16FCN, self).__init__()
self._phase = phase
self._is_training = self._is_net_for_training()
self._net_intermediate_results = collections.OrderedDict()
def _is_net_for_training(self):
"""
if the net is used for training or not
:return:
"""
if isinstance(self._phase, tf.Tensor):
phase = self._phase
else:
phase = tf.constant(self._phase, dtype=tf.string)
return tf.equal(phase, tf.constant('train', dtype=tf.string))
def _vgg16_conv_stage(self, input_tensor, k_size, out_dims, name,
stride=1, pad='SAME', need_layer_norm=True):
"""
stack conv and activation in vgg16
:param input_tensor:
:param k_size:
:param out_dims:
:param name:
:param stride:
:param pad:
:param need_layer_norm:
:return:
"""
with tf.variable_scope(name):
conv = self.conv2d(
inputdata=input_tensor, out_channel=out_dims,
kernel_size=k_size, stride=stride,
use_bias=False, padding=pad, name='conv'
)
if need_layer_norm:
bn = self.layerbn(inputdata=conv, is_training=self._is_training, name='bn')
relu = self.relu(inputdata=bn, name='relu')
else:
relu = self.relu(inputdata=conv, name='relu')
return relu
def _decode_block(self, input_tensor, previous_feats_tensor,
out_channels_nums, name, kernel_size=4,
stride=2, use_bias=False,
previous_kernel_size=4, need_activate=True):
"""
:param input_tensor:
:param previous_feats_tensor:
:param out_channels_nums:
:param kernel_size:
:param previous_kernel_size:
:param use_bias:
:param stride:
:param name:
:return:
"""
with tf.variable_scope(name_or_scope=name):
deconv_weights_stddev = tf.sqrt(
tf.divide(tf.constant(2.0, tf.float32),
tf.multiply(tf.cast(previous_kernel_size * previous_kernel_size, tf.float32),
tf.cast(tf.shape(input_tensor)[3], tf.float32)))
)
deconv_weights_init = tf.truncated_normal_initializer(
mean=0.0, stddev=deconv_weights_stddev)
deconv = self.deconv2d(
inputdata=input_tensor, out_channel=out_channels_nums, kernel_size=kernel_size,
stride=stride, use_bias=use_bias, w_init=deconv_weights_init,
name='deconv'
)
deconv = self.layerbn(inputdata=deconv, is_training=self._is_training, name='deconv_bn')
deconv = self.relu(inputdata=deconv, name='deconv_relu')
fuse_feats = tf.concat(
[previous_feats_tensor, deconv],
axis=-1, name='fuse_feats'
)
conv_weights_stddev = tf.sqrt(
tf.divide(tf.constant(2.0, tf.float32),
tf.multiply(tf.cast(kernel_size * kernel_size, tf.float32),
tf.cast(tf.shape(fuse_feats)[3], tf.float32)))
)
conv_weights_init = tf.truncated_normal_initializer(
mean=0.0, stddev=conv_weights_stddev)
fuse_feats = self.conv2d(
inputdata=fuse_feats,
out_channel=out_channels_nums,
kernel_size=3,
padding='SAME',
stride=1,
w_init=conv_weights_init,
use_bias=use_bias,
name='fuse_conv'
)
if need_activate:
fuse_feats = self.layerbn(
inputdata=fuse_feats, is_training=self._is_training, name='fuse_gn'
)
fuse_feats = self.relu(inputdata=fuse_feats, name='fuse_relu')
return fuse_feats
def _vgg16_fcn_encode(self, input_tensor, name):
"""
:param input_tensor:
:param name:
:return:
"""
with tf.variable_scope(name_or_scope=name):
# encode stage 1
conv_1_1 = self._vgg16_conv_stage(
input_tensor=input_tensor, k_size=3,
out_dims=64, name='conv1_1',
need_layer_norm=True
)
conv_1_2 = self._vgg16_conv_stage(
input_tensor=conv_1_1, k_size=3,
out_dims=64, name='conv1_2',
need_layer_norm=True
)
self._net_intermediate_results['encode_stage_1_share'] = {
'data': conv_1_2,
'shape': conv_1_2.get_shape().as_list()
}
# encode stage 2
pool1 = self.maxpooling(
inputdata=conv_1_2, kernel_size=2,
stride=2, name='pool1'
)
conv_2_1 = self._vgg16_conv_stage(
input_tensor=pool1, k_size=3,
out_dims=128, name='conv2_1',
need_layer_norm=True
)
conv_2_2 = self._vgg16_conv_stage(
input_tensor=conv_2_1, k_size=3,
out_dims=128, name='conv2_2',
need_layer_norm=True
)
self._net_intermediate_results['encode_stage_2_share'] = {
'data': conv_2_2,
'shape': conv_2_2.get_shape().as_list()
}
# encode stage 3
pool2 = self.maxpooling(
inputdata=conv_2_2, kernel_size=2,
stride=2, name='pool2'
)
conv_3_1 = self._vgg16_conv_stage(
input_tensor=pool2, k_size=3,
out_dims=256, name='conv3_1',
need_layer_norm=True
)
conv_3_2 = self._vgg16_conv_stage(
input_tensor=conv_3_1, k_size=3,
out_dims=256, name='conv3_2',
need_layer_norm=True
)
conv_3_3 = self._vgg16_conv_stage(
input_tensor=conv_3_2, k_size=3,
out_dims=256, name='conv3_3',
need_layer_norm=True
)
self._net_intermediate_results['encode_stage_3_share'] = {
'data': conv_3_3,
'shape': conv_3_3.get_shape().as_list()
}
# encode stage 4
pool3 = self.maxpooling(
inputdata=conv_3_3, kernel_size=2,
stride=2, name='pool3'
)
conv_4_1 = self._vgg16_conv_stage(
input_tensor=pool3, k_size=3,
out_dims=512, name='conv4_1',
need_layer_norm=True
)
conv_4_2 = self._vgg16_conv_stage(
input_tensor=conv_4_1, k_size=3,
out_dims=512, name='conv4_2',
need_layer_norm=True
)
conv_4_3 = self._vgg16_conv_stage(
input_tensor=conv_4_2, k_size=3,
out_dims=512, name='conv4_3',
need_layer_norm=True
)
self._net_intermediate_results['encode_stage_4_share'] = {
'data': conv_4_3,
'shape': conv_4_3.get_shape().as_list()
}
# encode stage 5 for binary segmentation
pool4 = self.maxpooling(
inputdata=conv_4_3, kernel_size=2,
stride=2, name='pool4'
)
conv_5_1_binary = self._vgg16_conv_stage(
input_tensor=pool4, k_size=3,
out_dims=512, name='conv5_1_binary',
need_layer_norm=True
)
conv_5_2_binary = self._vgg16_conv_stage(
input_tensor=conv_5_1_binary, k_size=3,
out_dims=512, name='conv5_2_binary',
need_layer_norm=True
)
conv_5_3_binary = self._vgg16_conv_stage(
input_tensor=conv_5_2_binary, k_size=3,
out_dims=512, name='conv5_3_binary',
need_layer_norm=True
)
self._net_intermediate_results['encode_stage_5_binary'] = {
'data': conv_5_3_binary,
'shape': conv_5_3_binary.get_shape().as_list()
}
# encode stage 5 for instance segmentation
conv_5_1_instance = self._vgg16_conv_stage(
input_tensor=pool4, k_size=3,
out_dims=512, name='conv5_1_instance',
need_layer_norm=True
)
conv_5_2_instance = self._vgg16_conv_stage(
input_tensor=conv_5_1_instance, k_size=3,
out_dims=512, name='conv5_2_instance',
need_layer_norm=True
)
conv_5_3_instance = self._vgg16_conv_stage(
input_tensor=conv_5_2_instance, k_size=3,
out_dims=512, name='conv5_3_instance',
need_layer_norm=True
)
self._net_intermediate_results['encode_stage_5_instance'] = {
'data': conv_5_3_instance,
'shape': conv_5_3_instance.get_shape().as_list()
}
return
def _vgg16_fcn_decode(self, name):
"""
:return:
"""
with tf.variable_scope(name):
# decode part for binary segmentation
with tf.variable_scope(name_or_scope='binary_seg_decode'):
decode_stage_5_binary = self._net_intermediate_results['encode_stage_5_binary']['data']
decode_stage_4_fuse = self._decode_block(
input_tensor=decode_stage_5_binary,
previous_feats_tensor=self._net_intermediate_results['encode_stage_4_share']['data'],
name='decode_stage_4_fuse', out_channels_nums=512, previous_kernel_size=3
)
decode_stage_3_fuse = self._decode_block(
input_tensor=decode_stage_4_fuse,
previous_feats_tensor=self._net_intermediate_results['encode_stage_3_share']['data'],
name='decode_stage_3_fuse', out_channels_nums=256
)
decode_stage_2_fuse = self._decode_block(
input_tensor=decode_stage_3_fuse,
previous_feats_tensor=self._net_intermediate_results['encode_stage_2_share']['data'],
name='decode_stage_2_fuse', out_channels_nums=128
)
decode_stage_1_fuse = self._decode_block(
input_tensor=decode_stage_2_fuse,
previous_feats_tensor=self._net_intermediate_results['encode_stage_1_share']['data'],
name='decode_stage_1_fuse', out_channels_nums=64
)
binary_final_logits_conv_weights_stddev = tf.sqrt(
tf.divide(tf.constant(2.0, tf.float32),
tf.multiply(4.0 * 4.0,
tf.cast(tf.shape(decode_stage_1_fuse)[3], tf.float32)))
)
binary_final_logits_conv_weights_init = tf.truncated_normal_initializer(
mean=0.0, stddev=binary_final_logits_conv_weights_stddev)
binary_final_logits = self.conv2d(
inputdata=decode_stage_1_fuse, out_channel=CFG.TRAIN.CLASSES_NUMS,
kernel_size=1, use_bias=False,
w_init=binary_final_logits_conv_weights_init,
name='binary_final_logits')
self._net_intermediate_results['binary_segment_logits'] = {
'data': binary_final_logits,
'shape': binary_final_logits.get_shape().as_list()
}
with tf.variable_scope(name_or_scope='instance_seg_decode'):
decode_stage_5_instance = self._net_intermediate_results['encode_stage_5_instance']['data']
decode_stage_4_fuse = self._decode_block(
input_tensor=decode_stage_5_instance,
previous_feats_tensor=self._net_intermediate_results['encode_stage_4_share']['data'],
name='decode_stage_4_fuse', out_channels_nums=512, previous_kernel_size=3)
decode_stage_3_fuse = self._decode_block(
input_tensor=decode_stage_4_fuse,
previous_feats_tensor=self._net_intermediate_results['encode_stage_3_share']['data'],
name='decode_stage_3_fuse', out_channels_nums=256)
decode_stage_2_fuse = self._decode_block(
input_tensor=decode_stage_3_fuse,
previous_feats_tensor=self._net_intermediate_results['encode_stage_2_share']['data'],
name='decode_stage_2_fuse', out_channels_nums=128)
decode_stage_1_fuse = self._decode_block(
input_tensor=decode_stage_2_fuse,
previous_feats_tensor=self._net_intermediate_results['encode_stage_1_share']['data'],
name='decode_stage_1_fuse', out_channels_nums=64, need_activate=False)
self._net_intermediate_results['instance_segment_logits'] = {
'data': decode_stage_1_fuse,
'shape': decode_stage_1_fuse.get_shape().as_list()
}
def build_model(self, input_tensor, name, reuse=False):
"""
:param input_tensor:
:param name:
:param reuse:
:return:
"""
with tf.variable_scope(name_or_scope=name, reuse=reuse):
# vgg16 fcn encode part
self._vgg16_fcn_encode(input_tensor=input_tensor, name='vgg16_encode_module')
# vgg16 fcn decode part
self._vgg16_fcn_decode(name='vgg16_decode_module')
return self._net_intermediate_results
if __name__ == '__main__':
"""
test code
"""
test_in_tensor = tf.placeholder(dtype=tf.float32, shape=[1, 256, 512, 3], name='input')
model = VGG16FCN(phase='train')
ret = model.build_model(test_in_tensor, name='vgg16fcn')
for layer_name, layer_info in ret.items():
print('layer name: {:s} shape: {}'.format(layer_name, layer_info['shape']))
|
the-stack_0_7474 | import chainer
import chainer.functions as F
import chainer.links as L
class SEBlock(chainer.Chain):
"""A squeeze-and-excitation block.
This block is part of squeeze-and-excitation networks. Channel-wise
multiplication weights are inferred from and applied to input feature map.
Please refer to `the original paper
<https://arxiv.org/pdf/1709.01507.pdf>`_ for more details.
.. seealso::
:class:`chainercv.links.model.senet.SEResNet`
Args:
n_channel (int): The number of channels of the input and output array.
ratio (int): Reduction ratio of :obj:`n_channel` to the number of
hidden layer units.
"""
def __init__(self, n_channel, ratio=16):
super(SEBlock, self).__init__()
reduction_size = n_channel // ratio
with self.init_scope():
self.down = L.Linear(n_channel, reduction_size)
self.up = L.Linear(reduction_size, n_channel)
def forward(self, u):
B, C, H, W = u.shape
z = F.average(u, axis=(2, 3))
x = F.relu(self.down(z))
x = F.sigmoid(self.up(x))
x = F.broadcast_to(x, (H, W, B, C))
x = x.transpose((2, 3, 0, 1))
return u * x
|
the-stack_0_7476 | from __future__ import print_function
import os.path, json, requests, logging, datetime, argparse, sys
from requests.packages.urllib3.exceptions import InsecureRequestWarning
#suppres warning for certificate
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
#API headers and url
HEADERS = {'content-type': 'application/json'}
TIME_ZONE_CHOICE = [
"Africa/Cairo", "Asia/Dhaka", "Asia/Yekaterinburg","Europe/London",
"Africa/Casablanca", "Asia/Hong_Kong", "Atlantic/Azores", "Europe/Madrid",
"Africa/Harare", "Asia/Irkutsk", "Atlantic/Cape_Verde", "Europe/Moscow",
"Africa/Kinshasa", "Asia/Kabul", "Australia/Adelaide", "Europe/Prague",
"Africa/Nairobi", "Asia/Karachi", "Australia/Brisbane", "Europe/Rome",
"America/Buenos_Aires", "Asia/Katmandu", "Australia/Darwin", "Europe/Warsaw",
"America/Caracas", "Asia/Krasnoyarsk", "Australia/Hobart", "GMT",
"America/Chihuahua", "Asia/Magadan", "Australia/Perth", "Pacific/Auckland",
"America/Lima", "Asia/Muscat", "Australia/Sydney", "Pacific/Fiji"
"America/Mexico_City", "Asia/Rangoon", "Canada/Atlantic", "Pacific/Guam",
"America/Panama", "Asia/Riyadh", "Canada/Central", "Pacific/Midway",
"America/Phoenix", "Asia/Seoul", "Canada/Newfoundland", "Pacific/Tongatapu",
"America/Santiago", "Asia/Singapore", "Etc/UTC+6", "US/Alaska",
"America/Sao_Paulo", "Asia/Taipei", "Etc/UTC-12", "US/Central",
"Asia/Almaty", "Asia/Tehran", "Etc/UTC-2", "US/East-Indiana",
"Asia/Baghdad", "Asia/Tel_Aviv", "Etc/UTC-3", "US/Eastern",
"Asia/Baku", "Asia/Tokyo", "Europe/Athens", "US/Hawaii",
"Asia/Bangkok", "Asia/Vladivostok", "Europe/Bucharest", "US/Mountain",
"Asia/Calcutta", "Asia/Yakutsk", "Europe/Helsinki", "US/Pacific"
]
#exit code standard:
#0 = OK
#1 = argument parser issue
#2 = environment issue such as invalid environment id, invalid password, or invalid scope
#3 = timeout
EXIT_CODE = 0
def get_api_endpoint(target_dc):
if target_dc == "defender-us-denver":
return "https://publicapi.alertlogic.net/api/lm/v1/"
elif target_dc == "defender-us-ashburn":
return "https://publicapi.alertlogic.com/api/lm/v1/"
elif target_dc == "defender-uk-newport":
return "https://publicapi.alertlogic.co.uk/api/lm/v1/"
else:
return False
def get_source_s3(token, endpoint, target_s3, target_cid):
API_ENDPOINT = endpoint + target_cid + "/sources/" + target_s3
REQUEST = requests.get(API_ENDPOINT, headers=HEADERS, auth=(token,''))
print ("Retrieving S3 Log source info status : " + str(REQUEST.status_code), str(REQUEST.reason))
if REQUEST.status_code == 200:
RESULT = json.loads(REQUEST.text)
else:
RESULT = {}
RESULT["s3"] = {}
RESULT["s3"]["id"] = "n/a"
return RESULT
def del_source_s3(token, endpoint, target_s3, target_cid):
API_ENDPOINT = endpoint + target_cid + "/sources/" + target_s3
REQUEST = requests.delete(API_ENDPOINT, headers=HEADERS, auth=(token,''))
print ("Delete S3 log source status : " + str(REQUEST.status_code), str(REQUEST.reason))
def del_s3_policy(token, endpoint, target_policy, target_cid):
API_ENDPOINT = endpoint + target_cid + "/policies/" + target_policy
REQUEST = requests.delete(API_ENDPOINT, headers=HEADERS, auth=(token,''))
print ("Delete S3 collection policy status : " + str(REQUEST.status_code), str(REQUEST.reason))
def del_credentials(token, endpoint, target_cred, target_cid):
API_ENDPOINT = endpoint + target_cid + "/credentials/" + target_cred
REQUEST = requests.delete(API_ENDPOINT, headers=HEADERS, auth=(token,''))
print ("Delete credentials status : " + str(REQUEST.status_code), str(REQUEST.reason))
def post_credentials(token, endpoint, payload, target_cid):
API_ENDPOINT = endpoint + target_cid + "/credentials/iam_role"
REQUEST = requests.post(API_ENDPOINT, headers=HEADERS, auth=(token,''), data=payload)
print ("Create Credentials status : " + str(REQUEST.status_code), str(REQUEST.reason))
if REQUEST.status_code == 201:
RESULT = json.loads(REQUEST.text)
else:
RESULT = {}
RESULT["iam_role"] = {}
RESULT["iam_role"]["id"] = "n/a"
return RESULT
def prep_credentials(iam_arn, iam_ext_id, cred_name):
#Setup dictionary for credentials payload
RESULT = {}
RESULT["iam_role"] = {}
RESULT["iam_role"]["arn"] = str(iam_arn)
RESULT["iam_role"]["external_id"] = str(iam_ext_id)
RESULT["iam_role"]["name"] = str(cred_name)
return RESULT
def post_s3_policy(token, endpoint, payload, target_cid):
API_ENDPOINT = endpoint + target_cid + "/policies/s3"
REQUEST = requests.post(API_ENDPOINT, headers=HEADERS, auth=(token,''), data=payload)
print ("Create S3 policy status : " + str(REQUEST.status_code), str(REQUEST.reason))
if REQUEST.status_code == 201:
RESULT = json.loads(REQUEST.text)
else:
RESULT = {}
RESULT["s3"] = {}
RESULT["s3"]["id"] = "n/a"
return RESULT
def prep_s3_policy(s3_policy_name, s3_policy_type):
#Setup dictionary for s3 collection payload
RESULT = {}
RESULT["s3"] = {}
RESULT["s3"]["name"] = str(s3_policy_name)
RESULT["s3"]["multiline"] = {}
RESULT["s3"]["multiline"]["is_multiline"] = False
if (s3_policy_type == "MsSQL"):
RESULT["s3"]["template_id"] = "3A943EDF-FB2C-1004-963D-005056853D45"
elif (s3_policy_type == "ELB"):
RESULT["s3"]["template_id"] = "A3069F39-FB68-1004-B9EA-005056853D45"
elif (s3_policy_type == "Redshift_Activity"):
RESULT["s3"]["template_id"] = "7B85CAC3-FB68-1004-B9EA-005056853D45"
elif (s3_policy_type == "Redshift_Con"):
RESULT["s3"]["template_id"] = "74173391-FB82-1004-B9EA-005056853D45"
elif (s3_policy_type == "Redshift_User"):
RESULT["s3"]["template_id"] = "D9675D68-FB93-1004-B9EA-005056853D45"
elif (s3_policy_type == "S3_Access"):
RESULT["s3"]["template_id"] = "AB51CD45-FB68-1004-B9EA-005056853D45"
return RESULT
def post_s3_source(token, endpoint, payload, target_cid):
API_ENDPOINT = endpoint + target_cid + "/sources/s3"
REQUEST = requests.post(API_ENDPOINT, headers=HEADERS, auth=(token,''), data=payload)
print ("Create S3 source status : " + str(REQUEST.status_code), str(REQUEST.reason))
if REQUEST.status_code == 201:
RESULT = json.loads(REQUEST.text)
else:
RESULT = {}
RESULT["s3"] = {}
RESULT["s3"]["id"] = "n/a"
return RESULT
def prep_s3_source(source_name, s3_bucket_name, file_pattern, time_zone, credential_id, policy_id):
#Setup dictionary for s3 collection payload
RESULT = {}
RESULT["s3"] = {}
RESULT["s3"]["name"] = str(source_name)
RESULT["s3"]["enabled"] = True
RESULT["s3"]["bucket"] = s3_bucket_name
RESULT["s3"]["file_pattern"] = file_pattern
RESULT["s3"]["time_zone"] = time_zone
RESULT["s3"]["credential_id"] = credential_id
RESULT["s3"]["policy_id"] = policy_id
return RESULT
#MAIN MODULE
if __name__ == '__main__':
EXIT_CODE=0
#Prepare parser and argument
parent_parser = argparse.ArgumentParser()
subparsers = parent_parser.add_subparsers(help="Select mode", dest="mode")
#Add parser for both ADD and DELETE mode
add_parser = subparsers.add_parser("ADD", help="Add CloudTrail collection")
del_parser = subparsers.add_parser("DEL", help="Delete CloudTrail collection")
#Parser argument for Add scope
add_parser.add_argument("--key", required=True, help="User Key for Alert Logic Log Manager API Authentication")
add_parser.add_argument("--cid", required=True, help="Alert Logic Customer CID as target")
add_parser.add_argument("--iam", required=True, help="Cross Account IAM role arn")
add_parser.add_argument("--ext", required=True, help="External ID specified in IAM role trust relationship")
add_parser.add_argument("--cred", required=True, help="Credential name, free form label, not visible in Alert Logic UI")
add_parser.add_argument("--name", required=True, help="S3 source name, free form label")
add_parser.add_argument("--pol", required=True, help="S3 Collection Policy name, free form label")
add_parser.add_argument("--type", required=True, help="S3 Collection Policy Template", choices=["MsSQL", "ELB", "Redshift_Activity", "Redshift_Con", "Redshift_User", "S3_Access"])
add_parser.add_argument("--s3", required=True, help="S3 bucket name as target for log collection")
add_parser.add_argument("--rgex", required=False, help="File name or Pattern, will use .* if not specified", default=".*")
add_parser.add_argument("--tz", required=False, help="Time zone (https://docs.alertlogic.com/developer/content/z-sandbox/apitest/endpoint/logmgrapi/commonparameters.htm)", choices=TIME_ZONE_CHOICE, default="US/Central")
add_parser.add_argument("--int", required=False, help="Collection interval (in seconds), will use 300 seconds if not specified", default=300)
add_parser.add_argument("--dc", required=True, help="Alert Logic Data center assignment, i.e. defender-us-denver, defender-us-ashburn or defender-uk-newport")
#Parser argument for Delete scope
del_parser.add_argument("--key", required=True, help="User Key for Alert Logic Log Manager API Authentication")
del_parser.add_argument("--cid", required=True, help="Alert Logic Customer CID as target")
del_parser.add_argument("--uid", required=True, help="S3 log source ID that you wish to delete")
del_parser.add_argument("--dc", required=True, help="Alert Logic Data center assignment, i.e. defender-us-denver, defender-us-ashburn or defender-uk-newport")
try:
args = parent_parser.parse_args()
except:
EXIT_CODE = 1
sys.exit(EXIT_CODE)
#Set argument to variables
if args.mode == "ADD":
print ("\n### Starting script - " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) + " - Deployment Mode = ADD ###\n")
APIKEY = args.key
TARGET_CID = args.cid
TARGET_IAM_ROLE_ARN = args.iam
TARGET_EXT_ID = args.ext
TARGET_CRED_NAME = args.cred
TARGET_NAME = args.name
TARGET_S3_POL = args.pol
TARGET_S3_NAME = args.s3
TARGET_S3_TYPE = args.type
TARGET_S3_REGEX = args.rgex
TARGET_TIME_ZONE = args.tz
TARGET_INTERVAL = args.int
TARGET_DEFENDER = args.dc
#get API endpoint
ALERT_LOGIC_LM = get_api_endpoint(TARGET_DEFENDER)
if ALERT_LOGIC_LM != False:
#Create credentials using the IAM role ARN and external ID
print ("### Creating IAM Role Link ###")
CRED_PAYLOAD = prep_credentials(TARGET_IAM_ROLE_ARN, TARGET_EXT_ID, TARGET_CRED_NAME)
CRED_RESULT = post_credentials(APIKEY, ALERT_LOGIC_LM, str(json.dumps(CRED_PAYLOAD, indent=4)), TARGET_CID)
CRED_ID = str(CRED_RESULT["iam_role"]["id"])
if CRED_ID != "n/a":
print ("Cred ID : " + CRED_ID)
#Prep the S3 Collection Policy payload
print ("### Creating S3 Collection Policy ###")
S3_POLICY_PAYLOAD = prep_s3_policy(TARGET_S3_POL, TARGET_S3_TYPE)
S3_POLICY_RESULT = post_s3_policy(APIKEY, ALERT_LOGIC_LM, str(json.dumps(S3_POLICY_PAYLOAD, indent=4)), TARGET_CID)
S3_POLICY_ID = str(S3_POLICY_RESULT["s3"]["id"])
if S3_POLICY_ID != "n/a":
print ("S3 Collection Policy ID : " + S3_POLICY_ID)
#Prep the S3 Log Source payload
print ("### Creating S3 Log Source ###")
S3_SOURCE_PAYLOAD = prep_s3_source(TARGET_NAME, TARGET_S3_NAME, TARGET_S3_REGEX, TARGET_TIME_ZONE, CRED_ID, S3_POLICY_ID)
S3_SOURCE_RESULT = post_s3_source(APIKEY, ALERT_LOGIC_LM, str(json.dumps(S3_SOURCE_PAYLOAD, indent=4)), TARGET_CID)
S3_SOURCE_ID = str(S3_SOURCE_RESULT["s3"]["id"])
if S3_SOURCE_ID != "n/a":
print ("S3 Source ID : " + S3_SOURCE_ID)
else:
EXIT_CODE=2
print ("### Failed to create S3 Log Source, see response code + reason above, stopping .. ###")
else:
EXIT_CODE=2
print ("### Failed to create S3 Collection Policy, see response code + reason above, stopping .. ###")
else:
EXIT_CODE=2
print ("### Failed to create credentials, see response code + reason above, stopping .. ###")
else:
EXIT_CODE=2
print ("Invalid data center assignment, use -h for more details, stopping ...")
elif args.mode == "DEL":
print ("\n### Starting script - " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) + " - Deployment Mode = DEL ###\n")
APIKEY = args.key
TARGET_CID = args.cid
TARGET_S3_SOURCE_ID = args.uid
TARGET_DEFENDER = args.dc
#get API endpoint
ALERT_LOGIC_LM = get_api_endpoint(TARGET_DEFENDER)
S3_SOURCE_RESULT = get_source_s3(APIKEY, ALERT_LOGIC_LM, TARGET_S3_SOURCE_ID, TARGET_CID)
if S3_SOURCE_RESULT["s3"]["id"] != "n/a":
#Get the credentials ID and Policy ID
TARGET_CRED_ID = S3_SOURCE_RESULT["s3"]["credential_id"]
TARGET_POLICY_ID = S3_SOURCE_RESULT["s3"]["policy_id"]
#Delete S3 log source
del_source_s3(APIKEY, ALERT_LOGIC_LM, TARGET_S3_SOURCE_ID, TARGET_CID)
#Delete S3 collection policy
del_s3_policy(APIKEY, ALERT_LOGIC_LM, TARGET_POLICY_ID, TARGET_CID)
#Delete S3 credentials
del_credentials(APIKEY, ALERT_LOGIC_LM, TARGET_CRED_ID, TARGET_CID)
else:
EXIT_CODE=2
print ("Failed to find the S3 log source ID, see response code + reason above, stopping ..")
print ("\n### Script stopped - " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) + "###\n")
sys.exit(EXIT_CODE) |
the-stack_0_7478 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) Flo Developers 2013-2018
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MempoolLimitTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]]
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
#create a mempool tx that will be evicted
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.01}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
# by now, the tx should be evicted, check confirmation state
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
if __name__ == '__main__':
MempoolLimitTest().main()
|
the-stack_0_7480 | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
"""Multi-class Focal loss implementation.
Args:
gamma (float): The larger the gamma, the smaller
the loss weight of easier samples.
weight (float): A manual rescaling weight given to each
class.
ignore_index (int): Specifies a target value that is ignored
and does not contribute to the input gradient.
"""
def __init__(self, gamma=2, weight=None, ignore_index=-100):
super().__init__()
self.gamma = gamma
self.weight = weight
self.ignore_index = ignore_index
def forward(self, input, target):
logit = F.log_softmax(input, dim=1)
pt = torch.exp(logit)
logit = (1 - pt)**self.gamma * logit
loss = F.nll_loss(
logit, target, self.weight, ignore_index=self.ignore_index)
return loss
|
the-stack_0_7481 | from __future__ import print_function
import pandas as pd
import numpy as np
import sys
from os import listdir, getcwd
from os.path import isdir, join, dirname, abspath
from pandas import concat
from nilmtk.utils import get_module_directory, check_directory_exists
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilm_metadata import convert_yaml_to_hdf5
"""
DATASET STRUCTURE:
------------------
On extracting all the dataset values, we should arrive at a similar directory structure as
mentioned.
ECO Dataset will have a folder '<i>_sm_csv' and '<i>_plug_csv' where i is the building no.
Originally, the expected folder structure was:
- <i>_sm_csv has a folder <i>
- <i>_plug_csv has folders 01, 02,....<n> where n is the plug numbers.
This version also supports the following structure, which can be created by unpacking the
ZIP files uniformly, creating a folder for each one:
- <i>_sm_csv has a folder <i>
- <i>_plug_csv has a folder <i>, and <i>_plug_csv/<i> has folders 01, 02,....<n>,
where n is the plug numbers.
Each folder has a CSV file as per each day, with each day csv file containing
86400 entries.
"""
plugs_column_name = {1: ('power', 'active')}
def convert_eco(dataset_loc, hdf_filename, timezone):
"""
Parameters:
-----------
dataset_loc: str
The root directory where the dataset is located.
hdf_filename: str
The location where the hdf_filename is present.
The directory location has to contain the
hdf5file name for the converter to work.
timezone: str
specifies the timezone of the dataset.
"""
# Creating a new HDF File
store = pd.HDFStore(hdf_filename, 'w', complevel=9, complib='blosc')
check_directory_exists(dataset_loc)
directory_list = [i for i in listdir(dataset_loc) if '.txt' not in i]
directory_list.sort()
print(directory_list)
found_any_sm = False
found_any_plug = False
# Traversing every folder
for folder in directory_list:
if folder[0] == '.' or folder[-3:] == '.h5':
print('Skipping ', folder)
continue
#Building number and meter_flag
building_no = int(folder[:2])
meter_flag = None
if 'sm_csv' in folder:
meter_flag = 'sm'
elif 'plugs' in folder:
meter_flag = 'plugs'
else:
print('Skipping folder', folder)
continue
print('Computing for folder', folder)
dir_list = [i for i in listdir(join(dataset_loc, folder)) if isdir(join(dataset_loc,folder,i))]
dir_list.sort()
if meter_flag == 'plugs' and len(dir_list) < 3:
# Try harder to find the subfolders
folder = join(folder, folder[:2])
dir_list = [i for i in listdir(join(dataset_loc, folder)) if isdir(join(dataset_loc,folder,i))]
print('Current dir list:', dir_list)
for fl in dir_list:
print('Computing for folder ', fl)
fl_dir_list = [i for i in listdir(join(dataset_loc,folder,fl)) if '.csv' in i]
fl_dir_list.sort()
if meter_flag == 'sm':
for fi in fl_dir_list:
found_any_sm = True
df = pd.read_csv(join(dataset_loc,folder,fl,fi), names=[i for i in range(1,17)], dtype=np.float32)
for phase in range(1,4):
key = str(Key(building=building_no, meter=phase))
df_phase = df.loc[:,[1+phase, 5+phase, 8+phase, 13+phase]]
# get reactive power
power = df_phase.loc[:, (1+phase, 13+phase)].values
reactive = power[:,0] * np.tan(power[:,1] * np.pi / 180)
df_phase['Q'] = reactive
df_phase.index = pd.DatetimeIndex(start=fi[:-4], freq='s', periods=86400, tz='GMT')
df_phase = df_phase.tz_convert(timezone)
sm_column_name = {
1+phase:('power', 'active'),
5+phase:('current', ''),
8+phase:('voltage', ''),
13+phase:('phase_angle', ''),
'Q': ('power', 'reactive'),
}
df_phase.columns = pd.MultiIndex.from_tuples(
sm_column_name[col] for col in df_phase.columns
)
power_active = df_phase['power', 'active']
tmp_before = np.size(power_active)
df_phase = df_phase[power_active != -1]
power_active = df_phase['power', 'active']
tmp_after = np.size(power_active)
if tmp_before != tmp_after:
print('Removed missing measurements - Size before: ' + str(tmp_before) + ', size after: ' + str(tmp_after))
df_phase.columns.set_names(LEVEL_NAMES, inplace=True)
if not key in store:
store.put(key, df_phase, format='Table')
else:
store.append(key, df_phase, format='Table')
store.flush()
print('Building', building_no, ', Meter no.', phase,
'=> Done for ', fi[:-4])
else:
#Meter number to be used in key
meter_num = int(fl) + 3
key = str(Key(building=building_no, meter=meter_num))
current_folder = join(dataset_loc,folder,fl)
if not fl_dir_list:
raise RuntimeError("No CSV file found in " + current_folder)
#Getting dataframe for each csv file seperately
for fi in fl_dir_list:
found_any_plug = True
df = pd.read_csv(join(current_folder, fi), names=[1], dtype=np.float64)
df.index = pd.DatetimeIndex(start=fi[:-4].replace('.', ':'), freq='s', periods=86400, tz = 'GMT')
df.columns = pd.MultiIndex.from_tuples(plugs_column_name.values())
df = df.tz_convert(timezone)
df.columns.set_names(LEVEL_NAMES, inplace=True)
tmp_before = np.size(df.power.active)
df = df[df.power.active != -1]
tmp_after = np.size(df.power.active)
if (tmp_before != tmp_after):
print('Removed missing measurements - Size before: ' + str(tmp_before) + ', size after: ' + str(tmp_after))
# If table not present in hdf5, create or else append to existing data
if not key in store:
store.put(key, df, format='Table')
print('Building',building_no,', Meter no.',meter_num,'=> Done for ',fi[:-4])
else:
store.append(key, df, format='Table')
store.flush()
print('Building',building_no,', Meter no.',meter_num,'=> Done for ',fi[:-4])
if not found_any_plug or not found_any_sm:
raise RuntimeError('The files were not found! Please check the folder structure. Extract each ZIP file into a folder with its base name (e.g. extract "01_plugs_csv.zip" into a folder named "01_plugs_csv", etc.)')
print("Data storage completed.")
store.close()
# Adding the metadata to the HDF5file
print("Proceeding to Metadata conversion...")
meta_path = join(
get_module_directory(),
'dataset_converters',
'eco',
'metadata'
)
convert_yaml_to_hdf5(meta_path, hdf_filename)
print("Completed Metadata conversion.")
|
the-stack_0_7484 | import re
class StringCal:
def __init__(self, formula:str, **variables):
self.formula = formula.replace(' ', '')
self.variables = variables
def get_coefficient(self) -> dict:
coefficients = {}
term = ""
for f in self.formula + ';':
variable_term = re.compile('(\W?[0-9]*)([a-zA-Z]+)')
constant_term = re.compile('(\W?[0-9]+)(\W)')
is_coefficientOne = re.compile('(\W?)([a-zA-Z]+)')
term += f
variable_term = variable_term.match(term)
constant_term = constant_term.match(term)
if variable_term == None and constant_term == None:
continue
elif variable_term != None:
variable = variable_term.group(2)
coefficient = variable_term.group(1)
if is_coefficientOne.match(variable_term.group()):
coefficient += '1'
try:
coefficients[variable] = eval(str(coefficients[variable]) + coefficient)
except KeyError:
coefficients[variable] = int(coefficient)
term = ""
elif constant_term != None:
constant = constant_term.group(1)
try:
coefficients['constant'] = eval(str(coefficients['constant']) + constant)
except KeyError:
coefficients['constant'] = int(constant)
term = constant_term.group(2)
return coefficients
def simplify(self) -> str:
simplified_formula = ""
no_plus_minus = re.compile('[0-9]+')
coefficients = self.get_coefficient()
for variable in coefficients:
coefficient = str(coefficients[variable])
if no_plus_minus.match(coefficient) != None and simplified_formula != '':
coefficient = '+' + coefficient
if variable == 'constant':
simplified_formula += coefficient
else:
simplified_formula += coefficient + variable
return simplified_formula
def define(self, **kwargs) -> int:
formula = self.formula
if kwargs != {}:
self.variables = kwargs
for var in self.variables:
var_value = str(self.variables[var])
formula = formula.replace(var, '*' + var_value)
return eval(formula)
if __name__ == '__main__':
formula = StringCal(formula='2x+3x+1-3+3y',x=1,y=1)
print(formula.define())
|
the-stack_0_7488 | # Copyright (c) 2018, Neil Booth
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# and warranty status of this software.
"""Merkle trees, branches, proofs and roots."""
from asyncio import Event
from math import ceil, log
from lbry.wallet.server.hash import double_sha256
class Merkle:
"""Perform merkle tree calculations on binary hashes using a given hash
function.
If the hash count is not even, the final hash is repeated when
calculating the next merkle layer up the tree.
"""
def __init__(self, hash_func=double_sha256):
self.hash_func = hash_func
def tree_depth(self, hash_count):
return self.branch_length(hash_count) + 1
def branch_length(self, hash_count):
"""Return the length of a merkle branch given the number of hashes."""
if not isinstance(hash_count, int):
raise TypeError('hash_count must be an integer')
if hash_count < 1:
raise ValueError('hash_count must be at least 1')
return ceil(log(hash_count, 2))
def branch_and_root(self, hashes, index, length=None):
"""Return a (merkle branch, merkle_root) pair given hashes, and the
index of one of those hashes.
"""
hashes = list(hashes)
if not isinstance(index, int):
raise TypeError('index must be an integer')
# This also asserts hashes is not empty
if not 0 <= index < len(hashes):
raise ValueError(f"index '{index}/{len(hashes)}' out of range")
natural_length = self.branch_length(len(hashes))
if length is None:
length = natural_length
else:
if not isinstance(length, int):
raise TypeError('length must be an integer')
if length < natural_length:
raise ValueError('length out of range')
hash_func = self.hash_func
branch = []
for _ in range(length):
if len(hashes) & 1:
hashes.append(hashes[-1])
branch.append(hashes[index ^ 1])
index >>= 1
hashes = [hash_func(hashes[n] + hashes[n + 1])
for n in range(0, len(hashes), 2)]
return branch, hashes[0]
def root(self, hashes, length=None):
"""Return the merkle root of a non-empty iterable of binary hashes."""
branch, root = self.branch_and_root(hashes, 0, length)
return root
def root_from_proof(self, hash, branch, index):
"""Return the merkle root given a hash, a merkle branch to it, and
its index in the hashes array.
branch is an iterable sorted deepest to shallowest. If the
returned root is the expected value then the merkle proof is
verified.
The caller should have confirmed the length of the branch with
branch_length(). Unfortunately this is not easily done for
bitcoin transactions as the number of transactions in a block
is unknown to an SPV client.
"""
hash_func = self.hash_func
for elt in branch:
if index & 1:
hash = hash_func(elt + hash)
else:
hash = hash_func(hash + elt)
index >>= 1
if index:
raise ValueError('index out of range for branch')
return hash
def level(self, hashes, depth_higher):
"""Return a level of the merkle tree of hashes the given depth
higher than the bottom row of the original tree."""
size = 1 << depth_higher
root = self.root
return [root(hashes[n: n + size], depth_higher)
for n in range(0, len(hashes), size)]
def branch_and_root_from_level(self, level, leaf_hashes, index,
depth_higher):
"""Return a (merkle branch, merkle_root) pair when a merkle-tree has a
level cached.
To maximally reduce the amount of data hashed in computing a
markle branch, cache a tree of depth N at level N // 2.
level is a list of hashes in the middle of the tree (returned
by level())
leaf_hashes are the leaves needed to calculate a partial branch
up to level.
depth_higher is how much higher level is than the leaves of the tree
index is the index in the full list of hashes of the hash whose
merkle branch we want.
"""
if not isinstance(level, list):
raise TypeError("level must be a list")
if not isinstance(leaf_hashes, list):
raise TypeError("leaf_hashes must be a list")
leaf_index = (index >> depth_higher) << depth_higher
leaf_branch, leaf_root = self.branch_and_root(
leaf_hashes, index - leaf_index, depth_higher)
index >>= depth_higher
level_branch, root = self.branch_and_root(level, index)
# Check last so that we know index is in-range
if leaf_root != level[index]:
raise ValueError('leaf hashes inconsistent with level')
return leaf_branch + level_branch, root
class MerkleCache:
"""A cache to calculate merkle branches efficiently."""
def __init__(self, merkle, source_func):
"""Initialise a cache hashes taken from source_func:
async def source_func(index, count):
...
"""
self.merkle = merkle
self.source_func = source_func
self.length = 0
self.depth_higher = 0
self.initialized = Event()
def _segment_length(self):
return 1 << self.depth_higher
def _leaf_start(self, index):
"""Given a level's depth higher and a hash index, return the leaf
index and leaf hash count needed to calculate a merkle branch.
"""
depth_higher = self.depth_higher
return (index >> depth_higher) << depth_higher
def _level(self, hashes):
return self.merkle.level(hashes, self.depth_higher)
async def _extend_to(self, length):
"""Extend the length of the cache if necessary."""
if length <= self.length:
return
# Start from the beginning of any final partial segment.
# Retain the value of depth_higher; in practice this is fine
start = self._leaf_start(self.length)
hashes = await self.source_func(start, length - start)
self.level[start >> self.depth_higher:] = self._level(hashes)
self.length = length
async def _level_for(self, length):
"""Return a (level_length, final_hash) pair for a truncation
of the hashes to the given length."""
if length == self.length:
return self.level
level = self.level[:length >> self.depth_higher]
leaf_start = self._leaf_start(length)
count = min(self._segment_length(), length - leaf_start)
hashes = await self.source_func(leaf_start, count)
level += self._level(hashes)
return level
async def initialize(self, length):
"""Call to initialize the cache to a source of given length."""
self.length = length
self.depth_higher = self.merkle.tree_depth(length) // 2
self.level = self._level(await self.source_func(0, length))
self.initialized.set()
def truncate(self, length):
"""Truncate the cache so it covers no more than length underlying
hashes."""
if not isinstance(length, int):
raise TypeError('length must be an integer')
if length <= 0:
raise ValueError('length must be positive')
if length >= self.length:
return
length = self._leaf_start(length)
self.length = length
self.level[length >> self.depth_higher:] = []
async def branch_and_root(self, length, index):
"""Return a merkle branch and root. Length is the number of
hashes used to calculate the merkle root, index is the position
of the hash to calculate the branch of.
index must be less than length, which must be at least 1."""
if not isinstance(length, int):
raise TypeError('length must be an integer')
if not isinstance(index, int):
raise TypeError('index must be an integer')
if length <= 0:
raise ValueError('length must be positive')
if index >= length:
raise ValueError('index must be less than length')
await self.initialized.wait()
await self._extend_to(length)
leaf_start = self._leaf_start(index)
count = min(self._segment_length(), length - leaf_start)
leaf_hashes = await self.source_func(leaf_start, count)
if length < self._segment_length():
return self.merkle.branch_and_root(leaf_hashes, index)
level = await self._level_for(length)
return self.merkle.branch_and_root_from_level(
level, leaf_hashes, index, self.depth_higher)
|
the-stack_0_7489 | """StorageTableSeeder Seeder."""
from masoniteorm.seeds import Seeder
from app.Storage import Storage
class StorageTableSeeder(Seeder):
def run(self):
"""Run the database seeds."""
Storage.create({
"storage_name": "blank",
"storage_brand": "blank",
"storage_type": "blank",
"storage_size": "blank",
"storage_price": 0,
"storage_img": "blank"
})
# NVMe
Storage.create({
"storage_name": "WD Black SN850",
"storage_brand": "Western Digital",
"storage_type": "NVMe SSD",
"storage_size": "1TB",
"storage_price": 164,
"storage_img": "https://i.imgur.com/QJVjs8j.jpg"
})
Storage.create({
"storage_name": "Samsung 970 Evo Plus",
"storage_brand": "Samsung",
"storage_type": "NVMe SSD",
"storage_size": "1TB",
"storage_price": 138,
"storage_img": "https://i.imgur.com/lhV4mhF.jpg"
})
Storage.create({
"storage_name": "Sabrent Rocket 4 Plus",
"storage_brand": "Sabrent",
"storage_type": "NVMe SSD",
"storage_size": "1TB",
"storage_price": 159,
"storage_img": "https://i.imgur.com/Ax9v8w4.jpg"
})
Storage.create({
"storage_name": "Samsung 980 Pro",
"storage_brand": "Samsung",
"storage_type": "NVMe SSD",
"storage_size": "1TB",
"storage_price": 184,
"storage_img": "https://i.imgur.com/HEWuIQF.jpg"
})
Storage.create({
"storage_name": "Crucial P5 Plus",
"storage_brand": "Crucial",
"storage_type": "NVMe SSD",
"storage_size": "1TB",
"storage_price": 149,
"storage_img": "https://i.imgur.com/XI7G1bA.jpg"
})
# SATA SSD
Storage.create({
"storage_name": "Samsung 870 EVO",
"storage_brand": "Samsung",
"storage_type": "SATA SSD",
"storage_size": "1TB",
"storage_price": 119,
"storage_img": "https://i.imgur.com/N2CWMLW.jpg"
})
Storage.create({
"storage_name": "Crucial MX500",
"storage_brand": "Crucial",
"storage_type": "SATA SSD",
"storage_size": "1TB",
"storage_price": 99,
"storage_img": "https://i.imgur.com/MUvepKg.jpg"
})
|
the-stack_0_7490 | import re
from .helpers import fuzzy
class ProcessSpeech:
def __init__(self, pa, localize, command, default_cast):
self.pa = pa
self.command = command
self.localize = localize
self.device = default_cast
self.tv_keys = localize["shows"] + localize["season"]["keywords"] + localize["episode"]["keywords"]
self.music_keys = localize["music"] + localize["artists"] + localize["albums"] + localize["tracks"]
self.process_command()
@property
def results(self):
results = {}
for x in ["media", "device", "season", "episode", "latest", "unwatched", "random", "ondeck", "control", "library"]:
results[x] = getattr(self, x, None)
return results
def process_command(self):
controls = self.localize["controls"]
pre_command = self.command
for control in controls:
ctrl = [controls[control]] if isinstance(controls[control], str) else controls[control]
for c in ctrl:
if self.command.startswith(c):
control_check = self.command.replace(c, "").strip()
if control_check == "":
self.control = control
return
device = fuzzy(control_check, self.pa.device_names)
self.find_replace("separator")
if device[0] in ["watched", "deck", "on watched", "on deck"]:
continue
elif device[1] > 60 and self.command.replace(device[0].lower(), "").strip() == c:
self.device = device[0]
self.control = control
return
self.command = pre_command
self.library = self.get_library()
self.find_replace("play_start")
for item in ["random", "latest", "unwatched", "ondeck"]:
setattr(self, item, self.find_replace(item))
for item in ["season", "episode"]:
if self.find_replace(item, False):
self.library = self.pa.media["shows"]
setattr(self, item, self.get_season_episode_num(self.localize[item]))
self.find_replace(item)
for item in ["artist", "album", "track", "playlist"]:
if self.find_replace(f"music_{item}"):
self.library = self.pa.media[f"{item}s"]
self.get_media_and_device()
def get_library(self):
cmd = self.command
for device in self.pa.device_names:
if device.lower() in cmd:
cmd = cmd.replace(device.lower(), "")
if any(word in cmd for word in self.tv_keys):
return self.pa.media["shows"]
if any(word in cmd for word in self.music_keys):
return self.pa.media["tracks"]
for item in ["movies", "artists", "albums", "tracks", "playlists"]:
if any(word in cmd for word in self.localize[item]):
return self.pa.media[item]
def is_device(self, media_list, separator):
split = self.command.split(separator)
full_score = fuzzy(self.command, media_list)[1]
split_score = fuzzy(self.command.replace(split[-1], "")[0], media_list)[1]
cast_score = fuzzy(split[-1], self.pa.device_names)[1]
return full_score < split_score or full_score < cast_score
def get_media_and_device(self):
media = None
for separator in self.localize["separator"]["keywords"]:
if separator in self.command:
self.find_replace("separator", True, separator)
if self.command.strip().startswith(separator + " "):
self.device = self.command.replace(separator, "").strip()
return
separator = f" {separator} "
if separator in self.command:
for item in ["show", "movie", "artist", "album", "track", "playlist", "all"]:
if item == "all" or self.library == self.pa.media[f"{item}s"]:
self.device = self.is_device(self.pa.media[f"{item}_titles"], separator)
if self.device:
split = self.command.split(separator)
self.command = self.command.replace(separator + split[-1], "")
self.device = split[-1]
self.find_replace("shows")
self.find_replace("movies")
for key in self.music_keys:
if not self.command.replace(key, ""):
self.command = self.command.replace(key, "")
lib = None if not getattr(self, "library", None) else getattr(self, "library")[0]
if self.find_replace("music_separator", False) and getattr(lib, "type", None) in ["artist", "album", "track", None]:
self.media = self.media_by_artist(lib) or self.command
else:
self.media = self.command
def media_by_artist(self, lib):
artist_media = None
for separator in self.localize["music_separator"]["keywords"]:
if separator in self.command:
self.find_replace("music_separator", True, separator)
split = self.command.split(f" {separator} ")
artist = fuzzy(split[-1], self.pa.media["artist_titles"])
if artist[1] > 60:
artist_albums = self.pa.server.search(artist[0], "album")
artist_album_titles = [x.title for x in artist_albums]
artist_tracks = self.pa.server.search(artist[0], "track")
artist_track_tracks = [x.title for x in artist_tracks]
if not lib:
artist_media = fuzzy(split[0], artist_album_titles + artist_track_tracks)
if artist_media[1] > 60:
return next((x for x in artist_albums + artist_tracks if artist_media[0] in getattr(x, "title", "")), None)
elif lib.type == "album":
artist_media = fuzzy(split[0], artist_album_titles)
if artist_media[1] > 60:
return next((x for x in artist_albums if artist_media[0] in getattr(x, "title", "")), None)
elif lib.type == "track":
artist_media = fuzzy(split[0], artist_track_tracks)
if artist_media[1] > 60:
return next((x for x in artist_tracks if artist_media[0] in getattr(x, "title", "")), None)
return self.command
def find_replace(self, item, replace=True, replacement=""):
item = self.localize[item]
if isinstance(item, str):
item = {"keywords": [item]}
elif isinstance(item, list):
item = {"keywords": item}
if all(keyword not in self.command for keyword in item["keywords"]):
return False
if replace:
if replacement:
replacement = f" {replacement} "
for keyword in item["keywords"]:
self.command = f" {self.command} "
for pre in item.get("pre", []):
self.command = self.command.replace(f"{pre} {keyword}", replacement)
for post in item.get("post", []):
self.command = self.command.replace(f"{keyword} {post}", replacement)
if keyword in self.command:
self.command = self.command.replace(f" {keyword} ", replacement)
self.command = self.command.strip()
self.command = " ".join(self.command.split())
return True
def convert_ordinals(self, item):
match = ""
matched = ""
ordinals = self.localize["ordinals"]
for word in item["keywords"]:
for ordinal in ordinals.keys():
if ordinal not in ("pre", "post") and ordinal in self.command:
match_before = re.search(fr"({ordinal})\s*({word})", self.command)
match_after = re.search(fr"({word})\s*({ordinal})", self.command)
if match_before:
match = match_before
matched = match.group(1)
if match_after:
match = match_after
matched = match.group(2)
if match:
replacement = match.group(0).replace(matched, ordinals[matched])
self.command = self.command.replace(match.group(0), replacement)
for pre in ordinals["pre"]:
if f"{pre} {match.group(0)}" in self.command:
self.command = self.command.replace(f"{pre} {match.group(0)}", replacement)
for post in ordinals["post"]:
if f"{match.group(0)} {post}" in self.command:
self.command = self.command.replace(f"{match.group(0)} {post}", replacement)
return self.command.strip()
def get_season_episode_num(self, item):
self.command = self.convert_ordinals(item)
phrase = ""
number = None
for keyword in item["keywords"]:
if keyword in self.command:
phrase = keyword
for pre in item["pre"]:
if pre in self.command:
regex = fr"(\d+\s+)({pre}\s+)({phrase}\s+)"
if re.search(regex, self.command):
self.command = re.sub(regex, fr"{phrase} \1 ", self.command)
else:
self.command = re.sub(
fr"({pre}\s+)({phrase}\s+)(\d+\s+)",
fr"{phrase} \3",
self.command,
)
self.command = re.sub(
fr"({phrase}\s+)(\d+\s+)({pre}\s+)",
fr"{phrase} \2",
self.command,
)
for post in item["post"]:
if post in self.command:
regex = fr"({phrase}\s+)({post}\s+)(\d+\s+)"
if re.search(regex, self.command):
self.command = re.sub(regex, fr"{phrase} \3", self.command)
else:
self.command = re.sub(
fr"(\d+\s+)({phrase}\s+)({post}\s+)",
fr"{phrase} \1",
self.command,
)
self.command = re.sub(
fr"({phrase}\s+)(\d+\s+)({post}\s+)",
fr" {phrase} \2",
self.command,
)
match = re.search(fr"(\d+)\s*({phrase}|^)|({phrase}|^)\s*(\d+)", self.command)
if match:
number = match.group(1) or match.group(4)
self.command = self.command.replace(match.group(0), "").strip()
return number
|
the-stack_0_7491 | import math
import time
import torch
import numpy as np
import pandas as pd
from torch import nn
import editdistance as ed
import soundfile as sf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#PRESERVE_INDICES = len(['<pad>', '<space>'])
PRESERVE_INDICES = len(['<pad>', '<space>', '<eos>'])
#IGNORE_INDICES = [0, 1, 41]
IGNORE_INDICES = [0, 1, 2, 42]
SEP = '\t'
class Timer():
''' Timer for recording training time distribution. '''
def __init__(self):
self.prev_t = time.time()
self.clear()
def set(self):
self.prev_t = time.time()
def cnt(self,mode):
self.time_table[mode] += time.time()-self.prev_t
self.set()
if mode =='bw':
self.click += 1
def show(self):
total_time = sum(self.time_table.values())
self.time_table['avg'] = total_time/self.click
self.time_table['rd'] = 100*self.time_table['rd']/total_time
self.time_table['fw'] = 100*self.time_table['fw']/total_time
self.time_table['bw'] = 100*self.time_table['bw']/total_time
msg = '{avg:.3f} sec/step (rd {rd:.1f}% | fw {fw:.1f}% | bw {bw:.1f}%)'.format(**self.time_table)
self.clear()
return msg
def clear(self):
self.time_table = {'rd':0,'fw':0,'bw':0}
self.click = 0
# Reference : https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/e2e_asr.py#L168
def init_weights(module):
# Exceptions
if type(module) == nn.Embedding:
module.weight.data.normal_(0, 1)
else:
for p in module.parameters():
data = p.data
if data.dim() == 1:
# bias
data.zero_()
elif data.dim() == 2:
# linear weight
n = data.size(1)
stdv = 1. / math.sqrt(n)
data.normal_(0, stdv)
elif data.dim() in [3,4]:
# conv weight
n = data.size(1)
for k in data.size()[2:]:
n *= k
stdv = 1. / math.sqrt(n)
data.normal_(0, stdv)
else:
raise NotImplementedError
def init_gate(bias):
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.)
return bias
def freq_loss(pred, label, sample_rate, n_mels, loss, differential_loss, emphasize_linear_low, p=1):
"""
Args:
pred: model output
label: target
loss: `l1` or `mse`
differential_loss: use differential loss or not, see here `https://arxiv.org/abs/1909.10302`
emphasize_linear_low: emphasize the low-freq. part of linear spectrogram or not
Return:
loss
"""
# ToDo : Tao
# pred -> BxTxD predicted mel-spec or linear-spec
# label-> same shape
# return loss for loss.backward()
if loss == 'l1':
criterion = torch.nn.functional.l1_loss
elif loss == 'mse':
criterion = torch.nn.functional.mse_loss
else:
raise NotImplementedError
cutoff_freq = 3000
# Repeat for postnet
#_, chn, _, dim = pred.shape
dim = pred.shape[-1]
#label = label.unsqueeze(1).repeat(1,chn,1,1)
loss_all = criterion(p * pred, p * label)
if dim != n_mels and emphasize_linear_low:
# Linear
n_priority_freq = int(dim * (cutoff_freq / (sample_rate/2)))
pred_low = pred[:, :, :n_priority_freq]
label_low = label[:, :, :n_priority_freq]
loss_low = criterion(p * pred_low, p * label_low)
#loss_low = torch.nn.functional.mse_loss(p * pred_low, p * label_low)
loss_all = 0.5 * loss_all + 0.5 * loss_low
if dim == n_mels and differential_loss:
pred_diff = pred[:, 1:, :] - pred[:, :-1, :]
label_diff = label[:, 1:, :] - label[:, :-1, :]
loss_all += 0.5 * criterion(p * pred_diff, p * label_diff)
return loss_all
def feat_to_fig(feat):
if feat is None:
return None
# feat TxD tensor
data = _save_canvas(feat.numpy().T)
return torch.FloatTensor(data),"HWC"
def data_to_bar(data, gt_data, tok_size, tick, zero_pad_tok=True):
if len(gt_data) == 0:
return None
# Hack to get discrete bar graph
cnts = [data.count(i)/len(data) for i in range(tok_size)]
gt_cnts = [gt_data.count(i)/len(gt_data) for i in range(tok_size)]
if zero_pad_tok:
cnts[0] = 0
gt_cnts[0] = 0
data = _save_canvas( (cnts,gt_cnts), meta=(range(tok_size),tick))
return torch.FloatTensor(data),"HWC"
def _save_canvas(data, meta=None):
fig, ax = plt.subplots(figsize=(16, 10))
if meta is None:
ax.imshow(data, aspect="auto", origin="lower")
else:
ax.bar(meta[0],data[0],tick_label=meta[1],fc=(0, 0, 1, 0.5))
ax.bar(meta[0],data[1],tick_label=meta[1],fc=(1, 0, 0, 0.5))
fig.canvas.draw()
# Note : torch tb add_image takes color as [0,1]
data = np.array(fig.canvas.renderer._renderer)[:,:,:-1]/255.0
plt.close(fig)
return data
# Reference : https://stackoverflow.com/questions/579310/formatting-long-numbers-as-strings-in-python
def human_format(num):
magnitude = 0
while num >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
return '{:3}{}'.format(num, [' ', 'K', 'M', 'G', 'T', 'P'][magnitude])
def cal_per(pred, truth):
# Calculate error rate of a batch
if pred is None:
return np.nan
elif len(pred.shape)>=3:
pred = pred.argmax(dim=-1)
er = []
for p,t in zip(pred.cpu(),truth.cpu()):
p = p.tolist()
p = [v for i,v in enumerate(p) if (i==0 or v!=p[i-1]) and v not in IGNORE_INDICES] # Trim repeat
t = [v for v in t.tolist() if v not in IGNORE_INDICES]
er.append(float(ed.eval( p,t))/len(t))
return sum(er)/len(er)
def cal_ppx(prob):
prob = prob.cpu()
prob_len = torch.sum(prob.sum(dim=-1)!=0,dim=-1,keepdim=True).float()
entropy = -torch.sum(prob*(prob+1e-10).log2(),dim=-1) # 2-based log
entropy = torch.mean(entropy.sum(dim=-1)/prob_len)
return torch.pow(torch.FloatTensor([2]),entropy)
# Reference :
# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/7e14834dd5e48bb1e6c74581c55684405e821298/transformer/Models.py
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
''' Sinusoid position encoding table '''
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.
return torch.FloatTensor(sinusoid_table)
def get_audio_feat_mask(actual_lengths, n_frames_per_step, dim):
"""
Return:
mask with 1 for padded part and 0 for non-padded part
"""
# padded length = actual lengths + at least 1 frame padded
padded_lengths = actual_lengths + n_frames_per_step-(actual_lengths%n_frames_per_step)
max_len = torch.max(padded_lengths).item()
if max_len % n_frames_per_step != 0:
max_len += n_frames_per_step - max_len % n_frames_per_step
assert max_len % n_frames_per_step == 0
ids = torch.arange(0, max_len).to(actual_lengths.device)
mask = (ids < padded_lengths.unsqueeze(1)).bool()
mask = ~mask
# (D, B, T)
mask = mask.expand(dim, mask.size(0), mask.size(1))
# (B, T, D)
mask = mask.permute(1, 2, 0)
return mask
def get_seq_mask(lens, max_len=None):
''' Mask for given sequence, return shape [B,T,1]'''
batch_size = len(lens)
max_l = lens.max() if max_len is None else max_len
mask = torch.arange(max_l).unsqueeze(0).repeat(batch_size,1).to(lens.device)>lens.unsqueeze(1)
return mask.unsqueeze(-1)
def read_phn_attr(phn_attr_pth, neg_val=0):
df = pd.read_csv(phn_attr_pth, index_col=0, sep=SEP)
attr = df.to_numpy()
attr[attr==0] = neg_val
attr = np.concatenate([np.zeros((PRESERVE_INDICES, attr.shape[1])), attr])
return attr
def get_audio_duration(path):
y, sr = sf.read(path)
return len(y) / sr
|
the-stack_0_7492 | """
@author: hugonnet
compile the differences to IceBridge and ICESat into elevation biases, standardized uncertainties, and elevation change biases for all regions and parameters of interest
"""
import os
import pandas as pd
import numpy as np
from pybob.ddem_tools import nmad
from glob import glob
import scipy.interpolate
from sklearn.linear_model import LinearRegression
from pybob.bob_tools import mkdir_p
import pyddem.fit_tools as ft
# dir_valid = '/home/atom/ongoing/work_worldwide/validation/icesat'
# dir_valid_out = '/home/atom/ongoing/work_worldwide/validation/compiled'
dir_valid = '/data/icesat/travail_en_cours/romain/results/valid'
dir_valid_out = '/data/icesat/travail_en_cours/romain/results/valid_compil_stable'
mkdir_p(dir_valid_out)
list_fn_valid = glob(os.path.join(dir_valid,'*.csv'),recursive=True)
print('Found validation file list:')
print(list_fn_valid)
print('Concatenating data...')
df = pd.DataFrame()
for fn_valid in list_fn_valid:
tmp_df = pd.read_csv(fn_valid)
reg = int(os.path.basename(fn_valid).split('_')[2])
if os.path.basename(fn_valid).split('_')[1] == 'ICESat':
sensor = 'ICS'
else:
sensor = 'IB'
tmp_df = tmp_df.assign(reg=reg,sensor=sensor)
df = df.append(tmp_df)
#we want time series minus validation, easier to conceptualize
df.zsc = -df.zsc
df.dh = -df.dh
df.dh_ref = -df.dh_ref
#glacier only
df = df[np.abs(df.dh_ref)<300]
df = df[df.pos==1]
#remove very large outliers
nmad_gla = nmad(df.zsc)
df=df[np.abs(df.zsc-np.nanmedian(df.zsc))<10*nmad_gla]
def bin_valid_df_by_vals(df,bins,bins_val,list_var=['dh','zsc'],ls_dvardt=True,weight_ib=1./40,return_ls=False):
mid_bin, med, std, dvardt, dvardt_2std, ns_ics, ns_ib = ([] for i in range(7))
for i in range(len(bins)-1):
ind = np.logical_and(bins_val >= bins[i],bins_val < bins[i+1])
df_ind = df[ind]
nics = np.count_nonzero(df_ind.sensor == 'ICS')
nib=np.count_nonzero(df_ind.sensor == 'IB')
ns_ics.append(nics)
ns_ib.append(nib)
mid_bin.append(bins[i] + 0.5*(bins[i+1]-bins[i]))
sub_med = []
sub_std = []
sub_dvardt = []
sub_dvardt_2std = []
sub_mu = []
sub_w = []
sub_t = []
for var in list_var:
if weight_ib is not None:
if nics != 0 or nib !=0:
sub_med.append(np.nansum((np.nanmedian(df_ind[df_ind.sensor=='ICS'][var])*nics,np.nanmedian(df_ind[df_ind.sensor=='IB'][var])*nib*weight_ib))/(nics+nib*weight_ib))
sub_std.append(np.nansum((nmad(df_ind[df_ind.sensor == 'ICS'][var]) * nics,nmad(df_ind[df_ind.sensor == 'IB'][var]) * nib * weight_ib)) / (nics + nib * weight_ib))
else:
sub_med.append(np.nan)
sub_std.append(np.nan)
else:
sub_med.append(np.nanmedian(df_ind[var]))
sub_std.append(nmad(df_ind[var].values))
if ls_dvardt:
list_t = sorted(list(set(list(df_ind.t.values))))
ftime_delta = np.array(
[(np.datetime64(t) - np.datetime64('{}-01-01'.format(int(2000)))).astype(int) / 365.2422 for t in list_t])
mu = []
w = []
for val_t in list_t:
ind_t = df_ind.t.values == val_t
df_ind_t = df_ind[ind_t]
nics_t = np.count_nonzero(df_ind_t.sensor == 'ICS')
nib_t = np.count_nonzero(df_ind_t.sensor == 'IB')
if np.count_nonzero(ind_t) > 20:
med_t = np.nansum((np.nanmedian(df_ind_t[df_ind_t.sensor=='ICS'][var])*nics_t,np.nanmedian(df_ind_t[df_ind_t.sensor=='IB'][var])*nib_t*weight_ib))/(nics_t+nib_t*weight_ib)
mu.append(med_t)
std_t = np.nansum((nmad(df_ind_t[df_ind_t.sensor == 'ICS'][var]) * nics_t,nmad(df_ind_t[df_ind_t.sensor == 'IB'][var]) * nib_t * weight_ib)) / (nics_t + nib_t * weight_ib)
w.append(std_t/np.sqrt(nics_t+nib_t*weight_ib))
else:
mu.append(np.nan)
w.append(np.nan)
mu = np.array(mu)
w = np.array(w)
if np.count_nonzero(~np.isnan(mu)) > 5:
# reg = LinearRegression().fit(ftime_delta[~np.isnan(mu)].reshape(-1, 1),
# mu[~np.isnan(mu)].reshape(-1, 1))
beta1, _ , incert_slope, _, _ = ft.wls_matrix(ftime_delta[~np.isnan(mu)], mu[~np.isnan(mu)], 1. / w[~np.isnan(mu)]**2,
conf_slope=0.95)
# fig = plt.figure()
# plt.scatter(ftime_delta,mu_dh,color='red')
# plt.plot(np.arange(0,10,0.1),reg.predict(np.arange(0,10,0.1).reshape(-1,1)),color='black',label=reg)
# plt.ylim([-20,20])
# plt.text(5,0,str(reg.coef_[0]))
# plt.legend()
# coef = reg.coef_[0][0]
coef = beta1
sub_dvardt.append(coef)
sub_dvardt_2std.append(incert_slope)
else:
sub_dvardt.append(np.nan)
sub_dvardt_2std.append(np.nan)
sub_mu.append(mu)
sub_w.append(w)
sub_t.append(ftime_delta)
med.append(sub_med)
std.append(sub_std)
dvardt.append(sub_dvardt)
dvardt_2std.append(sub_dvardt_2std)
df_out = pd.DataFrame()
df_out = df_out.assign(mid_bin=mid_bin, ns_ics=ns_ics, ns_ib=ns_ib)
for var in list_var:
df_out['med_' + var] = list(zip(*med))[list_var.index(var)]
df_out['nmad_' + var] = list(zip(*std))[list_var.index(var)]
if ls_dvardt:
df_out['d'+var+'_dt'] = list(zip(*dvardt))[list_var.index(var)]
df_out['d'+var+'_dt_2std'] = list(zip(*dvardt_2std))[list_var.index(var)]
if return_ls and ls_dvardt:
df_ls = pd.DataFrame()
for var in list_var:
# print(len(sub_mu))
df_ls['mu_' + var] = sub_mu[list_var.index(var)]
df_ls['w_' + var] = sub_w[list_var.index(var)]
df_ls['t_' + var] = sub_t[list_var.index(var)]
return df_out, df_ls
else:
return df_out
def bin_valid_df_by_season(df,var='dh',weight_ib=1./40):
date=df.t
season_month_bins = np.arange(1,13,1)
mon = pd.DatetimeIndex(date).month.values
med, std, mid_bin, ns_ics, ns_ib = ([] for i in range(5))
for i in range(len(season_month_bins)):
ind = (mon == season_month_bins[i])
df_ind = df[ind]
nics = np.count_nonzero(df_ind.sensor == 'ICS')
nib = np.count_nonzero(df_ind.sensor == 'IB')
ns_ics.append(nics)
ns_ib.append(nib)
# med.append(np.nanmedian(df_ind[var].values))
# std.append(nmad(df_ind[var].values))
if nics != 0 or nib != 0:
med.append(np.nansum((np.nanmedian(df_ind[df_ind.sensor == 'ICS'][var]) * nics,
np.nanmedian(df_ind[df_ind.sensor == 'IB'][var]) * nib * weight_ib)) / (nics + nib * weight_ib))
std.append(np.nansum((nmad(df_ind[df_ind.sensor == 'ICS'][var]) * nics,
nmad(df_ind[df_ind.sensor == 'IB'][var]) * nib * weight_ib)) / (
nics + nib * weight_ib))
else:
med.append(np.nan)
std.append(np.nan)
mid_bin.append(season_month_bins[i])
df_out = pd.DataFrame()
df_out = df_out.assign(seas_dec=mid_bin,ns_ics=ns_ics,ns_ib=ns_ib)
df_out['med_'+var]=med
df_out['nmad_'+var]=std
return df_out
#1/ BEFORE SEASONAL CORRECTIONS
print('Deriving statistics without seasonal corrections')
#normalize elevation by region
list_reg = sorted(list(set(list(df.reg))))
for reg in list_reg:
min_elev = np.nanpercentile(df[df.reg == reg].h,95)
max_elev = np.nanpercentile(df[df.reg == reg].h,5)
df.loc[df.reg == reg,'h'] = (df.loc[df.reg == reg,'h'] - min_elev)/(max_elev-min_elev)
ind_0 = np.logical_and(df.reg==reg,df.h<0)
df.loc[ind_0,'h']=np.nan
ind_1 = np.logical_and(df.reg==reg,df.h>1)
df.loc[ind_1,'h']=np.nan
bin_dt = [0,60,120,180,240,300,360,540,720,900,1080]
dt = bin_valid_df_by_vals(df, bin_dt, np.abs(df.dt))
dt['type'] = 'dt'
bin_t = [np.datetime64('20'+str(i).zfill(2)+'-01-01') for i in range(21)]
t = bin_valid_df_by_vals(df,bin_t,pd.to_datetime(df.t))
t['type'] = 't'
bin_h = np.arange(0,1.1,0.1)
h = bin_valid_df_by_vals(df,bin_h,df.h)
h['type'] = 'h'
bin_dh_tot = [-150,-100,-50,-35,-15,-10,-5,0,5,10,15]
dh_tot = bin_valid_df_by_vals(df, bin_dh_tot, df.dh_tot)
dh_tot['type'] = 'dh_tot'
bin_reg = np.arange(1, 21)
r = bin_valid_df_by_vals(df, bin_reg, df.reg)
r['type'] = 'reg'
bin_dh = np.arange(-12,13,2)
dh = bin_valid_df_by_vals(df, bin_dh, df.dh)
dh['type'] ='dh'
bin_zsc = np.arange(-3,3.1,0.5)
zsc = bin_valid_df_by_vals(df, bin_zsc, df.zsc)
zsc['type'] ='zsc'
bin_all = [min(df.zsc),max(df.zsc)]
a, a_ls = bin_valid_df_by_vals(df,bin_all,df.zsc,return_ls=True)
a['type'] = 'all'
df_north = df[df.reg <=15]
bin_months = np.arange(1, 14, 2)
months = pd.DatetimeIndex(df_north.t).month.values
m_n = bin_valid_df_by_vals(df_north,bin_months,months)
m_n['type'] = 'seas_north'
df_south = df[df.reg > 15]
bin_months = np.arange(1, 14, 2)
months = pd.DatetimeIndex(df_south.t).month.values
m_s = bin_valid_df_by_vals(df_south,bin_months,months)
m_s['type'] = 'seas_south'
df_init = pd.concat([dt,t,h,dh_tot,r,dh,zsc,a,m_n,m_s])
df_init['seas_corr'] = 0
fn_out = os.path.join(dir_valid_out,'valid_ICS_IB_all_bins_all_ls_init.csv')
a_ls.to_csv(fn_out)
#2/ COMPUTE SEASONAL BIASES BY REGION
print('Computing and applying seasonal corrections')
list_s = []
list_s2 = []
for reg in list(set(list(df.reg))):
df_reg = df[df.reg == reg]
# df_reg = df_reg[df_reg.sensor=='ICS']
s = bin_valid_df_by_season(df_reg)
coefs1, _ = scipy.optimize.curve_fit(lambda t, a, b, c: a ** 2 * np.sin(t * 2 * np.pi / 12 + c) + b, s.seas_dec[~np.isnan(s.med_dh)].values,
s.med_dh[~np.isnan(s.med_dh)].values)
s2 = bin_valid_df_by_season(df_reg,var='zsc')
coefs2, _ = scipy.optimize.curve_fit(lambda t, a, b, c: a ** 2 * np.sin(t * 2 * np.pi / 12 + c) + b, s2.seas_dec[~np.isnan(s2.med_zsc)].values,
s2.med_zsc[~np.isnan(s2.med_zsc)].values)
season_month_bins = np.arange(1, 13, 1)
mon = pd.DatetimeIndex(df.t).month.values
for i in range(len(season_month_bins)):
ind = np.logical_and(mon == season_month_bins[i],df.reg==reg)
df.loc[ind,'dh'] -= coefs1[0] ** 2 * np.sin(season_month_bins[i] * 2 * np.pi / 12 + coefs1[2]) + coefs1[1]
df.loc[ind,'zsc'] -= coefs2[0] ** 2 * np.sin(season_month_bins[i] * 2 * np.pi / 12 + coefs2[2]) + coefs2[1]
s['reg'] = reg
s['var'] = 'dh'
s2['reg']=reg
s2['var']='zsc'
s['amp'] = coefs1[0]**2
s['phase'] = coefs1[2]*12/(2*np.pi) % 12
s['h_shift'] = coefs1[1]
s2['amp_zsc'] = coefs2[0]**2
s2['phase_zsc'] = coefs2[2]*12/(2*np.pi) % 12
s2['h_shift_zsc'] = coefs2[1]
list_s.append(s)
list_s2.append(s2)
#
# df_north = df[df.reg <=15]
# df_south = df[df.reg > 15]
#
# s_n_dh = bin_valid_df_by_season(df_north)
# s_n_dh['hemi'] = 'north'
# s_n_dh['var'] = 'dh'
# s_n_zsc = bin_valid_df_by_season(df_north,var='zsc')
# s_n_zsc['hemi'] = 'north'
# s_n_zsc['var'] = 'zsc'
#
# s_s_dh = bin_valid_df_by_season(df_south)
# s_s_dh['hemi'] = 'south'
# s_s_dh['var'] = 'dh'
# s_s_zsc = bin_valid_df_by_season(df_south,var='zsc')
# s_s_zsc['hemi'] = 'south'
# s_s_zsc['var'] = 'zsc'
#
# s_ns = pd.concat([s_n_dh,s_n_zsc,s_s_dh,s_s_zsc])
# fn_seas_ns = os.path.join(dir_valid_out,'valid_ICS_IB_seas_NS.csv')
# s_ns.to_csv(fn_seas_ns)
df_seas = pd.concat(list_s+list_s2)
fn_seas = os.path.join(dir_valid_out,'valid_ICS_IB_seas_corr_final_weight.csv')
df_seas.to_csv(fn_seas)
#
# #3/ AFTER SEASONAL CORRECTIONS
print('Deriving statistics after seasonal corrections')
bin_dt = [0,60,120,180,240,300,360,540,720,900,1080]
dt = bin_valid_df_by_vals(df, bin_dt, np.abs(df.dt))
dt['type'] = 'dt'
bin_t = [np.datetime64('20'+str(i).zfill(2)+'-01-01') for i in range(21)]
t = bin_valid_df_by_vals(df,bin_t,pd.to_datetime(df.t))
t['type'] = 't'
bin_h = np.arange(0,1.1,0.1)
h = bin_valid_df_by_vals(df,bin_h,df.h)
h['type'] = 'h'
bin_dh_tot = [-150,-100,-50,-35,-15,-10,-5,0,5,10,15]
dh_tot = bin_valid_df_by_vals(df, bin_dh_tot, df.dh_tot)
dh_tot['type'] = 'dh_tot'
bin_reg = np.arange(1, 21)
r = bin_valid_df_by_vals(df, bin_reg, df.reg)
r['type'] = 'reg'
bin_dh = np.arange(-12,13,2)
dh = bin_valid_df_by_vals(df, bin_dh, df.dh)
dh['type'] ='dh'
bin_zsc = np.arange(-3,3.1,0.5)
zsc = bin_valid_df_by_vals(df, bin_zsc, df.zsc)
zsc['type'] ='zsc'
bin_all = [min(df.zsc),max(df.zsc)]
a, a_ls = bin_valid_df_by_vals(df,bin_all,df.zsc,return_ls=True)
a['type'] = 'all'
df_north = df[df.reg <=15]
bin_months = np.arange(1, 14, 2)
months = pd.DatetimeIndex(df_north.t).month.values
m_n = bin_valid_df_by_vals(df_north,bin_months,months)
m_n['type'] = 'seas_north'
df_south = df[df.reg > 15]
bin_months = np.arange(1, 14, 2)
months = pd.DatetimeIndex(df_south.t).month.values
m_s = bin_valid_df_by_vals(df_south,bin_months,months)
m_s['type'] = 'seas_south'
df_end = pd.concat([dt,t,h,dh_tot,r,dh,zsc,a,m_n,m_s])
df_end['seas_corr'] = 1
df_out = pd.concat([df_init,df_end])
fn_out = os.path.join(dir_valid_out,'valid_ICS_IB_all_bins_final_weight.csv')
df_out.to_csv(fn_out)
fn_a_ls = os.path.join(dir_valid_out,'valid_ICS_IB_all_bins_final_weight_all_ls.csv')
a_ls.to_csv(fn_a_ls) |
the-stack_0_7494 | """
Default configurations for action recognition TA3N lightning
"""
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
_C.TO_VALIDATE = False # choices = [True, False]
# -----------------------------------------------------------------------------
# Paths
# -----------------------------------------------------------------------------
_C.PATHS = CN()
_C.PATHS.PATH_DATA_ROOT = "data/" # directory where the feature pickles are stored. Depends on users
_C.PATHS.PATH_LABELS_ROOT = "annotations/" # directory where the annotations are stored. Depends on users
_C.PATHS.PATH_EXP_ROOT="model/action-model/" # directory where the checkpoints are to be stored. Depends on users
_C.PATHS.DATASET_SOURCE="source_train" # depends on users
_C.PATHS.DATASET_TARGET="target_train" # depends on users
# training
_C.PATHS.PATH_DATA_SOURCE=os.path.join(_C.PATHS.PATH_DATA_ROOT, _C.PATHS.DATASET_SOURCE)
_C.PATHS.PATH_DATA_TARGET=os.path.join(_C.PATHS.PATH_DATA_ROOT, _C.PATHS.DATASET_TARGET)
_C.PATHS.TRAIN_SOURCE_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, 'EPIC_100_uda_source_train.pkl') # '/domain_adaptation_source_train_pre-release_v3.pkl'
_C.PATHS.TRAIN_TARGET_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, 'EPIC_100_uda_target_train_timestamps.pkl') # '/domain_adaptation_target_train_pre-release_v6.pkl'
_C.PATHS.VAL_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, "EPIC_100_uda_target_test_timestamps.pkl")
_C.PATHS.PATH_EXP=os.path.join(_C.PATHS.PATH_EXP_ROOT, "Testexp")
# validation
_C.PATHS.VAL_DATASET_SOURCE="source_val" # depends on users
_C.PATHS.VAL_DATASET_TARGET="target_val" # depends on users
_C.PATHS.PATH_VAL_DATA_SOURCE=os.path.join(_C.PATHS.PATH_DATA_ROOT, _C.PATHS.VAL_DATASET_SOURCE)
_C.PATHS.PATH_VAL_DATA_TARGET=os.path.join(_C.PATHS.PATH_DATA_ROOT, _C.PATHS.VAL_DATASET_TARGET)
_C.PATHS.VAL_SOURCE_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, "EPIC_100_uda_source_val.pkl")
_C.PATHS.VAL_TARGET_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, "EPIC_100_uda_target_val.pkl")
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASET = CN()
_C.DATASET.DATASET = "epic" # dataset choices = [hmdb_ucf, hmdb_ucf_small, ucf_olympic]
_C.DATASET.NUM_CLASSES = "97,300"
_C.DATASET.NUM_SOURCE= 16115 # number of training data (source)
_C.DATASET.NUM_TARGET= 26115 # number of training data (target)
_C.DATASET.MODALITY = "RGB" # choices = [ALL, RGB, Audio, Flow]
_C.DATASET.FRAME_TYPE = "feature" # choices = [frame]
_C.DATASET.NUM_SEGMENTS = 5 # sample frame # of each video for training
_C.DATASET.VAL_SEGMENTS = 5 # sample frame # of each video for training
_C.DATASET.BASELINE_TYPE = "video" # choices = ['frame', 'tsn']
_C.DATASET.FRAME_AGGREGATION = "trn-m" # method to integrate the frame-level features. choices = [avgpool, trn, trn-m, rnn, temconv]
# ---------------------------------------------------------------------------- #
# Model
# ---------------------------------------------------------------------------- #
_C.MODEL = CN()
_C.MODEL.ADD_FC = 1 # number of shared features
_C.MODEL.FC_DIM = 512 # dimension of shared features
_C.MODEL.ARCH = "TBN" # choices = [resnet50]
_C.MODEL.USE_TARGET = "uSv" # choices = [uSv, Sv, none]
_C.MODEL.SHARE_PARAMS = "Y" # choices = [Y, N]
_C.MODEL.PRED_NORMALIZE = "N" # choices = [Y, N]
_C.MODEL.WEIGHTED_CLASS_LOSS_DA = "N" # choices = [Y, N]
_C.MODEL.WEIGHTED_CLASS_LOSS = "N" # choices = [Y, N]
_C.MODEL.DROPOUT_I = 0.5
_C.MODEL.DROPOUT_V = 0.5
_C.MODEL.NO_PARTIALBN = True
# DA configs
if _C.MODEL.USE_TARGET == "none":
_C.MODEL.EXP_DA_NAME="baseline"
else:
_C.MODEL.EXP_DA_NAME="DA"
_C.MODEL.DIS_DA = "DAN" # choices = [DAN, CORAL, JAN]
_C.MODEL.ADV_POS_0 = "Y" # discriminator for relation features. choices = [Y, N]
_C.MODEL.ADV_DA = "RevGrad" # choices = [None]
_C.MODEL.ADD_LOSS_DA = "attentive_entropy" # choices = [None, target_entropy, attentive_entropy]
_C.MODEL.ENS_DA = None # choices = [None, MCD]
# Attention configs
_C.MODEL.USE_ATTN = "TransAttn" # choices = [None, TransAttn, general]
_C.MODEL.USE_ATTN_FRAME = None # choices = [None, TransAttn, general]
_C.MODEL.USE_BN = None # choices = [None, AdaBN, AutoDIAL]
_C.MODEL.N_ATTN = 1
_C.MODEL.PLACE_DIS = ["Y", "Y", "N"]
_C.MODEL.PLACE_ADV = ["Y", "Y", "Y"]
# ---------------------------------------------------------------------------- #
# Hyperparameters
# ---------------------------------------------------------------------------- #
_C.HYPERPARAMETERS = CN()
_C.HYPERPARAMETERS.ALPHA = 0
_C.HYPERPARAMETERS.BETA = [0.75, 0.75, 0.5]
_C.HYPERPARAMETERS.GAMMA = 0.003 # U->H: 0.003 | H->U: 0.3
_C.HYPERPARAMETERS.MU = 0
# ---------------------------------------------------------------------------- #
# Trainer
# ---------------------------------------------------------------------------- #
_C.TRAINER = CN()
_C.TRAINER.TRAIN_METRIC = "all" # choices = [noun, verb]
_C.TRAINER.FC_DIM = 512 # dimension of shared features
_C.TRAINER.ARCH = "TBN" # choices = [resnet50]
_C.TRAINER.USE_TARGET = "uSv" # choices = [uSv, Sv, none]
_C.TRAINER.SHARE_PARAMS = "Y" # choices = [Y, N]
_C.TRAINER.PRETRAIN_SOURCE = False
_C.TRAINER.VERBOSE = True
_C.TRAINER.DANN_WARMUP = True
# Learning configs
_C.TRAINER.LOSS_TYPE = 'nll'
_C.TRAINER.LR = 0.003
_C.TRAINER.LR_DECAY = 10
_C.TRAINER.LR_ADAPTIVE = None # choices = [None, loss, dann]
_C.TRAINER.LR_STEPS = [10, 20]
_C.TRAINER.MOMENTUM = 0.9
_C.TRAINER.WEIGHT_DECAY = 0.0001
_C.TRAINER.BATCH_SIZE = [128, int(128*_C.DATASET.NUM_TARGET/_C.DATASET.NUM_SOURCE), 128]
_C.TRAINER.OPTIMIZER_NAME = "SGD" # choices = [SGD, Adam]
_C.TRAINER.CLIP_GRADIENT = 20
_C.TRAINER.PRETRAINED = None
_C.TRAINER.RESUME = ""
_C.TRAINER.RESUME_HP = ""
_C.TRAINER.MIN_EPOCHS = 25
_C.TRAINER.MAX_EPOCHS = 30
_C.TRAINER.ACCELERATOR = "ddp"
_C.PATHS.EXP_PATH = os.path.join(_C.PATHS.PATH_EXP + '_' + _C.TRAINER.OPTIMIZER_NAME + '-share_params_' + _C.MODEL.SHARE_PARAMS + '-lr_' + str(_C.TRAINER.LR) + '-bS_' + str(_C.TRAINER.BATCH_SIZE[0]), _C.DATASET.DATASET + '-'+ str(_C.DATASET.NUM_SEGMENTS) + '-alpha_' + str(_C.HYPERPARAMETERS.ALPHA) + '-beta_' + str(_C.HYPERPARAMETERS.BETA[0])+ '_'+ str(_C.HYPERPARAMETERS.BETA[1])+'_'+ str(_C.HYPERPARAMETERS.BETA[2])+"_gamma_" + str(_C.HYPERPARAMETERS.GAMMA) + "_mu_" + str(_C.HYPERPARAMETERS.MU))
# ---------------------------------------------------------------------------- #
# Tester
# ---------------------------------------------------------------------------- #
_C.TESTER = CN()
_C.TESTER.TEST_TARGET_DATA = os.path.join(_C.PATHS.PATH_DATA_ROOT, "target_test")
_C.TESTER.WEIGHTS = os.path.join(_C.PATHS.EXP_PATH , "checkpoint.pth.tar")
_C.TESTER.NOUN_WEIGHTS = None
_C.TESTER.BATCH_SIZE = 512
_C.TESTER.NOUN_TARGET_DATA = None
_C.TESTER.RESULT_JSON = "test.json"
_C.TESTER.TEST_SEGMENTS = 5 # sample frame # of each video for testing
_C.TESTER.SAVE_SCORES = os.path.join(_C.PATHS.EXP_PATH , "scores")
_C.TESTER.SAVE_CONFUSION = os.path.join(_C.PATHS.EXP_PATH , "confusion_matrix")
_C.TESTER.VERBOSE = True
# ---------------------------------------------------------------------------- #
# Miscellaneous configs
# ---------------------------------------------------------------------------- #
_C.MODEL.N_RNN = 1
_C.MODEL.RNN_CELL = "LSTM"
_C.MODEL.N_DIRECTIONS = 1
_C.MODEL.N_TS = 5
_C.MODEL.TENSORBOARD = True
_C.MODEL.FLOW_PREFIX = ""
_C.TRAINER.JOBS = 2
_C.TRAINER.EF = 1
_C.TRAINER.PF = 50
_C.TRAINER.SF = 50
_C.TRAINER.COPY_LIST = ["N", "N"]
_C.TRAINER.SAVE_MODEL = True
def get_cfg_defaults():
return _C.clone() |
the-stack_0_7495 | # coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fit_gmm_pair."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from ott.tools.gaussian_mixture import fit_gmm
from ott.tools.gaussian_mixture import fit_gmm_pair
from ott.tools.gaussian_mixture import gaussian_mixture
from ott.tools.gaussian_mixture import gaussian_mixture_pair
from ott.tools.gaussian_mixture import probabilities
class FitGmmPairTest(parameterized.TestCase):
def setUp(self):
super().setUp()
mean_generator0 = jnp.array([[2., -1.],
[-2., 0.],
[4., 3.]])
cov_generator0 = jnp.array([[[0.2, 0.], [0., 0.1]],
[[0.6, 0.], [0., 0.3]],
[[0.5, 0.4], [0.4, 0.5]]])
weights_generator0 = jnp.array([0.3, 0.3, 0.4])
gmm_generator0 = (
gaussian_mixture.GaussianMixture.from_mean_cov_component_weights(
mean=mean_generator0,
cov=cov_generator0,
component_weights=weights_generator0))
# shift the means to the right by varying amounts
mean_generator1 = mean_generator0 + jnp.array([[1., -0.5],
[-1., -1.],
[-1., 0.]])
cov_generator1 = cov_generator0
weights_generator1 = weights_generator0 + jnp.array([0., 0.1, -0.1])
gmm_generator1 = (
gaussian_mixture.GaussianMixture.from_mean_cov_component_weights(
mean=mean_generator1,
cov=cov_generator1,
component_weights=weights_generator1))
self.epsilon = 1.e-2
self.rho = 0.1
self.tau = self.rho / (self.rho + self.epsilon)
key = jax.random.PRNGKey(0)
self.key, subkey0, subkey1 = jax.random.split(key, num=3)
self.samples_gmm0 = gmm_generator0.sample(key=subkey0, size=2000)
self.samples_gmm1 = gmm_generator1.sample(key=subkey1, size=2000)
@parameterized.named_parameters(
('balanced_unweighted', True, False),
('balanced_weighted', True, True),
('unbalanced_unweighted', False, False),
('unbalanced_weighted', False, True))
def test_fit_gmm(self, balanced, weighted):
# dumb integration test that makes sure nothing crashes
if balanced:
tau = 1.
else:
tau = self.tau
if weighted:
weights0 = jnp.ones(self.samples_gmm0.shape[0])
weights1 = jnp.ones(self.samples_gmm0.shape[0])
weights_pooled = jnp.concatenate([weights0, weights1], axis=0)
else:
weights0 = None
weights1 = None
weights_pooled = None
# Fit a GMM to the pooled samples
samples = jnp.concatenate([self.samples_gmm0, self.samples_gmm1])
gmm_init = fit_gmm.initialize(
key=self.key,
points=samples,
point_weights=weights_pooled,
n_components=3,
verbose=False)
gmm = fit_gmm.fit_model_em(
gmm=gmm_init,
points=samples,
point_weights=None,
steps=20)
# use the same mixture model for gmm0 and gmm1 initially
pair_init = gaussian_mixture_pair.GaussianMixturePair(
gmm0=gmm, gmm1=gmm, epsilon=self.epsilon, tau=tau)
fit_model_em_fn = fit_gmm_pair.get_fit_model_em_fn(
weight_transport=0.1,
jit=True)
fit_model_em_fn(pair=pair_init,
points0=self.samples_gmm0,
points1=self.samples_gmm1,
point_weights0=weights0,
point_weights1=weights1,
em_steps=1,
m_steps=10,
verbose=False)
if __name__ == '__main__':
absltest.main()
|
the-stack_0_7501 | import coreapi
import maya
import logging
import pydash
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.shortcuts import get_object_or_404
from django.http import QueryDict
from django.db.models.functions import Concat
from django.db.models import TextField
from django.conf import settings
from rest_framework.viewsets import GenericViewSet
from rest_framework import permissions
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, mixins
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from rest_framework_bulk import (
ListBulkCreateUpdateDestroyAPIView,
)
from rest_condition import Or
from talentmap_api.common.common_helpers import in_group_or_403, get_prefetched_filtered_queryset
from talentmap_api.common.permissions import isDjangoGroupMember
from talentmap_api.common.mixins import FieldLimitableSerializerMixin
from talentmap_api.available_positions.models import AvailablePositionFavorite, AvailablePositionDesignation, AvailablePositionRanking, AvailablePositionRankingLock
from talentmap_api.available_positions.serializers.serializers import AvailablePositionDesignationSerializer, AvailablePositionRankingSerializer, AvailablePositionRankingLockSerializer
from talentmap_api.available_positions.filters import AvailablePositionRankingFilter, AvailablePositionRankingLockFilter
from talentmap_api.user_profile.models import UserProfile
from talentmap_api.projected_vacancies.models import ProjectedVacancyFavorite
import talentmap_api.fsbid.services.available_positions as services
import talentmap_api.fsbid.services.projected_vacancies as pvservices
import talentmap_api.fsbid.services.common as comservices
import talentmap_api.fsbid.services.employee as empservices
import talentmap_api.fsbid.services.bid as bidservices
logger = logging.getLogger(__name__)
FAVORITES_LIMIT = settings.FAVORITES_LIMIT
class AvailablePositionsFilter():
declared_filters = [
"exclude_available",
"exclude_projected",
]
use_api = True
class Meta:
fields = "__all__"
class AvailablePositionFavoriteListView(APIView):
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter('page', openapi.IN_QUERY, type=openapi.TYPE_INTEGER, description='A page number within the paginated result set.'),
openapi.Parameter('limit', openapi.IN_QUERY, type=openapi.TYPE_INTEGER, description='Number of results to return per page.')
])
def get(self, request, *args, **kwargs):
"""
get:
Return a list of all of the user's favorite available positions.
"""
user = UserProfile.objects.get(user=self.request.user)
aps = AvailablePositionFavorite.objects.filter(user=user, archived=False).values_list("cp_id", flat=True)
limit = request.query_params.get('limit', 15)
page = request.query_params.get('page', 1)
ordering = request.query_params.get('ordering', None)
if len(aps) > 0:
comservices.archive_favorites(aps, request)
pos_nums = ','.join(aps)
return Response(services.get_available_positions(QueryDict(f"id={pos_nums}&limit={limit}&page={page}&ordering={ordering}"),
request.META['HTTP_JWT'],
f"{request.scheme}://{request.get_host()}"))
else:
return Response({"count": 0, "next": None, "previous": None, "results": []})
class AvailablePositionFavoriteIdsListView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
"""
get:
Return a list of the ids of the user's favorite available positions.
"""
user = UserProfile.objects.get(user=self.request.user)
aps = AvailablePositionFavorite.objects.filter(user=user, archived=False).values_list("cp_id", flat=True)
return Response(aps)
class AvailablePositionRankingView(FieldLimitableSerializerMixin,
GenericViewSet,
ListBulkCreateUpdateDestroyAPIView,
mixins.ListModelMixin,
mixins.RetrieveModelMixin):
permission_classes = [Or(isDjangoGroupMember('ao_user'), isDjangoGroupMember('bureau_user'), isDjangoGroupMember('post_user')), ]
serializer_class = AvailablePositionRankingSerializer
filter_class = AvailablePositionRankingFilter
# For all requests, if the position is locked, then the user must have the appropriate bureau permission for the cp_id
def perform_create(self, serializer):
if isinstance(self.request.data, list):
data = self.request.data
# Empty array
if len(data) == 0:
raise SuspiciousOperation('Array is empty')
cp = data[0].get('cp_id')
# All cp_id values must match the first one
if not all(x.get('cp_id') == data[0].get('cp_id') for x in data):
raise SuspiciousOperation('All cp_id values must be identical')
# if a single object is passed
if isinstance(self.request.data, dict):
cp = self.request.data.get('cp_id')
hasBureauPermissions = empservices.has_bureau_permissions(cp, self.request.META['HTTP_JWT'])
hasOrgPermissions = empservices.has_org_permissions(cp, self.request.META['HTTP_JWT'])
exists = AvailablePositionRankingLock.objects.filter(cp_id=cp).exists()
# is locked and does not have bureau permissions
if exists and not hasBureauPermissions:
raise PermissionDenied()
# not locked and (has org permission or bureau permission)
if not exists and (hasOrgPermissions or hasBureauPermissions):
serializer.save(user=self.request.user.profile)
elif exists and hasBureauPermissions:
serializer.save(user=self.request.user.profile)
else:
raise PermissionDenied()
def get_queryset(self):
cp = self.request.GET.get('cp_id')
hasBureauPermissions = empservices.has_bureau_permissions(cp, self.request.META['HTTP_JWT'])
hasOrgPermissions = empservices.has_org_permissions(cp, self.request.META['HTTP_JWT'])
if hasOrgPermissions or hasBureauPermissions:
return get_prefetched_filtered_queryset(AvailablePositionRanking, self.serializer_class).order_by('rank')
# doesn't have permission
raise PermissionDenied()
def perform_delete(self, request, pk, format=None):
'''
Removes the available position rankings by cp_id for the user
'''
cp = pk
hasBureauPermissions = empservices.has_bureau_permissions(cp, self.request.META['HTTP_JWT'])
hasOrgPermissions = empservices.has_org_permissions(cp, self.request.META['HTTP_JWT'])
exists = AvailablePositionRankingLock.objects.filter(cp_id=cp).exists()
# is locked and does not have bureau permissions
if exists and not hasBureauPermissions:
return Response(status=status.HTTP_403_FORBIDDEN)
# not locked and (has org permission or bureau permission)
elif not exists and (hasOrgPermissions or hasBureauPermissions):
get_prefetched_filtered_queryset(AvailablePositionRanking, self.serializer_class, cp_id=pk).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
elif exists and hasBureauPermissions:
get_prefetched_filtered_queryset(AvailablePositionRanking, self.serializer_class, cp_id=pk).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# doesn't have permission
return Response(status=status.HTTP_403_FORBIDDEN)
class AvailablePositionRankingLockView(FieldLimitableSerializerMixin,
GenericViewSet,
mixins.ListModelMixin,
mixins.RetrieveModelMixin):
permission_classes = (IsAuthenticated,)
serializer_class = AvailablePositionRankingLockSerializer
filter_class = AvailablePositionRankingLockFilter
def put(self, request, pk, format=None):
# must have bureau permission for the bureau code associated with the position
if not empservices.has_bureau_permissions(pk, request.META['HTTP_JWT']):
return Response(status=status.HTTP_403_FORBIDDEN)
# get the bureau code and org code associated with the position
pos = services.get_available_position(pk, request.META['HTTP_JWT'])
try:
bureau = pos.get('position').get('bureau_code')
org = pos.get('position').get('organization_code')
# return a 404 if we can't determine the bureau/org code
except:
return Response(status=status.HTTP_404_NOT_FOUND)
if pos is None:
return Response(status=status.HTTP_404_NOT_FOUND)
# if the position is already locked, still update the bureau/org codes
if AvailablePositionRankingLock.objects.filter(cp_id=pk).exists():
AvailablePositionRankingLock.objects.filter(cp_id=pk).update(bureau_code=bureau, org_code=org)
return Response(status=status.HTTP_204_NO_CONTENT)
# save the cp_id, bureau code and org code
position, _ = AvailablePositionRankingLock.objects.get_or_create(cp_id=pk, bureau_code=bureau, org_code=org)
position.save()
return Response(status=status.HTTP_204_NO_CONTENT)
def get(self, request, pk, format=None):
'''
Indicates if the available position is locked
Returns 204 if the available position is locked, otherwise, 404
'''
# must have bureau permission for the bureau code associated with the position
if not empservices.has_bureau_permissions(pk, request.META['HTTP_JWT']) and not empservices.has_org_permissions(pk, self.request.META['HTTP_JWT']):
return Response(status=status.HTTP_403_FORBIDDEN)
if AvailablePositionRankingLock.objects.filter(cp_id=pk).exists():
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
def delete(self, request, pk, format=None):
'''
Removes the available position ranking by cp_id
'''
# must have bureau permission for the bureau code associated with the position
if not empservices.has_bureau_permissions(pk, request.META['HTTP_JWT']):
return Response(status=status.HTTP_403_FORBIDDEN)
get_prefetched_filtered_queryset(AvailablePositionRankingLock, self.serializer_class, cp_id=pk).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class FavoritesCSVView(APIView):
permission_classes = (IsAuthenticated,)
filter_class = AvailablePositionsFilter
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter('exclude_available', openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN, description='Whether to exclude available positions'),
openapi.Parameter('exclude_projected', openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN, description='Whether to exclude projected vacancies')
])
def get(self, request, *args, **kwargs):
"""
Return a list of all of the user's favorite positions.
"""
user = UserProfile.objects.get(user=self.request.user)
data = []
aps = AvailablePositionFavorite.objects.filter(user=user, archived=False).values_list("cp_id", flat=True)
if len(aps) > 0 and request.query_params.get('exclude_available') != 'true':
pos_nums = ','.join(aps)
apdata = services.get_available_positions(QueryDict(f"id={pos_nums}&limit={len(aps)}&page=1"), request.META['HTTP_JWT'])
data = data + apdata.get('results')
pvs = ProjectedVacancyFavorite.objects.filter(user=user, archived=False).values_list("fv_seq_num", flat=True)
if len(pvs) > 0 and request.query_params.get('exclude_projected') != 'true':
pos_nums = ','.join(pvs)
pvdata = pvservices.get_projected_vacancies(QueryDict(f"id={pos_nums}&limit={len(pvs)}&page=1"), request.META['HTTP_JWT'])
data = data + pvdata.get('results')
return comservices.get_ap_and_pv_csv(data, "favorites", True)
class AvailablePositionFavoriteActionView(APIView):
'''
Controls the favorite status of a available position
Responses adapted from Github gist 'stars' https://developer.github.com/v3/gists/#star-a-gist
'''
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
'''
Indicates if the available position is a favorite
Returns 204 if the available position is a favorite, otherwise, 404
'''
user = UserProfile.objects.get(user=self.request.user)
if AvailablePositionFavorite.objects.filter(user=user, cp_id=pk, archived=False).exists():
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
def put(self, request, pk, format=None):
'''
Marks the available position as a favorite
'''
user = UserProfile.objects.get(user=self.request.user)
aps = AvailablePositionFavorite.objects.filter(user=user, archived=False).values_list("cp_id", flat=True)
comservices.archive_favorites(aps, request)
aps_after_archive = AvailablePositionFavorite.objects.filter(user=user, archived=False).values_list("cp_id", flat=True)
if len(aps_after_archive) >= FAVORITES_LIMIT:
return Response({"limit": FAVORITES_LIMIT}, status=status.HTTP_507_INSUFFICIENT_STORAGE)
else:
AvailablePositionFavorite.objects.get_or_create(user=user, cp_id=pk)
return Response(status=status.HTTP_204_NO_CONTENT)
def delete(self, request, pk, format=None):
'''
Removes the available position from favorites
'''
user = UserProfile.objects.get(user=self.request.user)
AvailablePositionFavorite.objects.filter(user=user, cp_id=pk).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class AvailablePositionDesignationView(mixins.UpdateModelMixin,
FieldLimitableSerializerMixin,
GenericViewSet):
'''
partial_update:
Updates an available position designation
'''
serializer_class = AvailablePositionDesignationSerializer
permission_classes = (IsAuthenticatedOrReadOnly,)
def get_queryset(self):
queryset = AvailablePositionDesignation.objects.all()
queryset = self.serializer_class.prefetch_model(AvailablePositionDesignation, queryset)
return queryset
def get_object(self):
queryset = self.get_queryset()
pk = self.kwargs.get('pk', None)
obj, _ = queryset.get_or_create(cp_id=pk)
self.check_object_permissions(self.request, obj)
return obj
class AvailablePositionHighlightListView(APIView):
"""
list:
Return a list of all currently highlighted available positions
"""
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, *args, **kwargs):
"""
get:
Return a list of all of the higlighted available positions.
"""
cp_ids = AvailablePositionDesignation.objects.filter(is_highlighted=True).values_list("cp_id", flat=True)
if len(cp_ids) > 0:
pos_nums = ','.join(cp_ids)
return Response(services.get_available_positions(QueryDict(f"id={pos_nums}"), request.META['HTTP_JWT']))
else:
return Response({"count": 0, "next": None, "previous": None, "results": []})
class AvailablePositionHighlightActionView(APIView):
'''
Controls the highlighted status of an available position
'''
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
'''
Indicates if the position is highlighted
Returns 204 if the position is highlighted, otherwise, 404
'''
position = get_object_or_404(AvailablePositionDesignation, cp_id=pk)
if position.is_highlighted is True:
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
def put(self, request, pk, format=None):
'''
Marks the position as highlighted by the position's bureau
'''
position, _ = AvailablePositionDesignation.objects.get_or_create(cp_id=pk)
in_group_or_403(self.request.user, "superuser")
position.is_highlighted = True
position.save()
return Response(status=status.HTTP_204_NO_CONTENT)
def delete(self, request, pk, format=None):
'''
Removes the position from highlighted positions
'''
position, _ = AvailablePositionDesignation.objects.get_or_create(cp_id=pk)
in_group_or_403(self.request.user, "superuser")
position.is_highlighted = False
position.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class BureauBiddersRankings(APIView):
permission_classes = [Or(isDjangoGroupMember('ao_user'), isDjangoGroupMember('bureau_user'), isDjangoGroupMember('post_user')), ]
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter('id', openapi.IN_PATH, type=openapi.TYPE_STRING, description='perdet of Bureau bidder'),
openapi.Parameter('cp_id', openapi.IN_PATH, type=openapi.TYPE_STRING, description='cp_id of position')
])
def get(self, request, pk, cp_id):
"""
Return position information for all of bidders' bids including their ranking information for those positions
"""
user_bids = bidservices.user_bids(pk, request.META['HTTP_JWT'])
user_rankings = AvailablePositionRanking.objects.filter(bidder_perdet=pk).exclude(cp_id=cp_id)
num_sl_bids = 0
filtered_bids = []
for bid in user_bids:
try:
pos_id = str(int(pydash.get(bid, 'position_info.id')))
rank = user_rankings.filter(cp_id=pos_id).values_list("rank", flat=True).first()
if rank is not None:
num_sl_bids += 1
hasBureauPermissions = empservices.has_bureau_permissions(pos_id, self.request.META['HTTP_JWT'])
hasOrgPermissions = empservices.has_org_permissions(pos_id, self.request.META['HTTP_JWT'])
if hasOrgPermissions or hasBureauPermissions:
bid["ranking"] = rank
filtered_bids.append(bid)
except Exception as e:
logger.error(f"{type(e).__name__} at line {e.__traceback__.tb_lineno} of {__file__}: {e}")
filtered_bids.sort(key=lambda x: x['ranking'])
other_sl_bids = num_sl_bids - len(filtered_bids)
return Response({
"results": filtered_bids,
"other-sl-bidcount": 0 if pydash.is_negative(other_sl_bids) else other_sl_bids,
})
|
the-stack_0_7502 | import json
import subprocess
import requests
from shutil import copyfile
from distutils.dir_util import copy_tree
import urllib.request
import urllib
from urllib.parse import urlparse
import os
import sys
import argparse
import time
from socket import error as SocketError
from snakemake.io import expand
workflows=['read_filtering', 'test_files', 'assembly', 'comparison', 'sourmash_db', 'kaiju_db', 'taxonomic_classification', 'functional_inference', 'mtsv_db', 'all'] #keep all at the end of the list
CHOCOPLAN_DIR = "chocophlan_plus_viral"
UNIREF_DIR ="uniref90"
BRACKEN_DIR = "Bracken_Kraken2_DB"
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = min(int(count*block_size*100/total_size),100)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
#certain files want special attention Needs to be installed in certain subdir inside data folder
def download_extract_targz_file(file_name, install_sub_dir, install_dir, url_string):
if not os.path.isdir(os.path.join(install_dir, install_sub_dir)):
print("\nDownloading " + file_name)
try:
urllib.request.urlretrieve(url_string, install_dir+ "/"+ file_name, reporthook)
mkdir_command = "mkdir " + install_dir + "/" + install_sub_dir
subprocess.run([mkdir_command], shell =True)
gunzip_command = "gunzip " + install_dir + "/" + file_name
subprocess.run([gunzip_command], shell =True)
file_name = file_name.replace('.gz','')
untar_command = "tar -xvf " + install_dir + "/" + file_name + " -C " + install_dir + "/" + install_sub_dir
subprocess.run([untar_command], shell =True)
except SocketError as e:
print("Error downloading/extracting file " + file_name + "Retry script.")
print(e)
try:
os.remove(install_dir+ "/"+file_name)
except OSError:
pass
def download_sourmash_files(data, workflow, install_dir):
tar_file = data[workflow]['sbttar']
db = data[workflow]['databases']
kv = data[workflow]['kvalue']
sbturl = data[workflow]['sbturl']
sourmash_files = expand(tar_file, database=db, kvalue=kv)
for file in sourmash_files:
if not (os.path.isfile(install_dir+"/"+file)):
print("\nDownloading " + file +" from " +sbturl)
try:
urllib.request.urlretrieve("http://" +sbturl + '/' +file, install_dir +"/" +file, reporthook)
except SocketError as e:
print("Error downloading file " + file + "Retry script.")
print(e)
# try:
# os.remove(install_dir+ "/"+file)
# except OSError:
# pass
def download_kmer_files(file_name, install_sub_dir, install_dir, url_string):
if not (os.path.isdir(install_dir + "/" + install_sub_dir)):
mkdir_command = "mkdir " + install_dir + "/" + install_sub_dir
subprocess.run([mkdir_command], shell =True)
urllib.request.urlretrieve(url_string, install_dir+ "/" + install_sub_dir+ "/" +file_name, reporthook)
def download_file(workflow, data, install_dir):
if workflow in data.keys():
if (workflow == "post_processing"):
install_dir = "post_processing/"
for file_name, url_string in data[workflow].items():
try:
url = urlparse(url_string)
except Exception as e: #we don't care since some of the JSONS are not URL's
pass
if (file_name == 'sbttar'): #sourmash files from the taxonomic classification workflow.
download_sourmash_files(data, workflow, install_dir)
elif (file_name == 'full_chocophlan_plus_viral.v0.1.1.tar.gz'):
download_extract_targz_file(file_name, CHOCOPLAN_DIR, install_dir, url_string)
elif (file_name == 'uniref90_annotated_1_1.tar.gz'):
download_extract_targz_file(file_name, UNIREF_DIR, install_dir, url_string)
elif (file_name.endswith("kmer_distrib")):
download_kmer_files(file_name, BRACKEN_DIR, install_dir, url_string)
elif (url.scheme == "http" or url.scheme == "https" or url.scheme == "ftp"): #download via http, ftp
if not (os.path.isfile(os.path.join(install_dir, file_name)) ):
print("Downloading " +file_name + " from " + url_string)
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(url_string, install_dir+ "/"+ file_name, reporthook)
if (file_name.endswith('.tgz') or file_name.endswith('.tar.gz')):
untar_command = "tar -zxvf " + install_dir+"/" + file_name + " -C " + install_dir + " && rm -f " + install_dir+"/" + file_name
subprocess.run([untar_command], shell=True)
elif (file_name.endswith('.gz') and not file_name.endswith('fq.gz')):
unzip_command = "gunzip -c " + install_dir+"/" + file_name + " > " + install_dir + "/" + os.path.splitext(file_name)[0] + " && rm -f " + install_dir+"/" + file_name
subprocess.run([unzip_command], shell=True)
except SocketError as e:
print("Error downloading file " + file_name + " Retry script.")
print(e)
try:
os.remove(install_dir+ "/"+file_name)
except OSError:
print("Error unable to delete " + file_name)
elif (url.scheme == 'docker'): #download singularity image
if not (os.path.isfile("../container_images/"+file_name)):
print("Downloading singularity image " +file_name)
sing_command = "singularity pull "+url_string
try:
subprocess.run([sing_command], shell=True)
os.rename(file_name, "../container_images/"+file_name)
except OSError as e:
print("OS Error " + file_name)
print(e)
elif (url.scheme == "dir"): #copy dir
if not (os.path.isdir(os.path.join(install_dir, file_name))):
print("Copying "+ file_name)
try:
copy_tree(".."+ url.path, install_dir+"/"+file_name)
except OSError as e:
print('Directory not copied. Error: ' +str(e))
elif (url.scheme == "file"): #copy file from local location
if not (os.path.isfile(os.path.join(install_dir, file_name))):
print("Copying "+ file_name)
if (file_name.endswith('.tgz')):
untar_command = "tar -zxvf " + ".." + url.path + " -C " + install_dir + " && rm -f " + install_dir+"/" + file_name
try:
subprocess.run([untar_command], shell=True)
except OSError as e:
print('OS Error: ' +str(e))
else:
try:
copyfile(".."+ url.path, install_dir+ "/"+ file_name)
except OSError as e:
print('File not copied. Error: ' +str(e))
elif (url.scheme == "gdrive"):
if not (os.path.isfile(os.path.join(install_dir, file_name))):
print("Downloading "+ file_name)
destination = os.path.join(install_dir, file_name)
try:
download_file_from_google_drive(url.netloc, destination)
except OSError as e:
print("Failed download from GDrive for " + file_name)
def main_func(user_input, install_dir, file_list='config/offline_downloads.json'):
try:
with open(file_list)as f:
data = json.load(f)
except IOError:
print("Error: offline_downloads.json is missing. Exiting")
sys.exit(1)
try:
if not os.path.isdir("data"):
os.mkdir("data")
except IOError:
print("Error: can't create data directory")
if ('all' in user_input):
user_input = workflows[0:-1]
for workflow in user_input:
download_file(workflow, data, install_dir)
else:
for workflow in user_input:
download_file(workflow, data, install_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Script to download data required for offline processing of Dahak software. Requires config/offline_downloads.json")
parser.add_argument("--workflow", nargs='+', help="Download databases/images for inputed workflow", choices=workflows, type=str.lower, required=True)
parser.add_argument("--data_dir", help="directory to copy non image files to", default="data")
args = parser.parse_args()
install_dir = args.data_dir
user_input = args.workflow
main_func(user_input, install_dir)
|
the-stack_0_7506 | """Utilities for real-time data augmentation on image data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
ImageEnhance = None
try:
import scipy
# scipy.linalg cannot be accessed until explicitly imported
from scipy import linalg
# scipy.ndimage cannot be accessed until explicitly imported
from scipy import ndimage
except ImportError:
scipy = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.random.uniform(-rg, rg)
x = apply_affine_transform(x, theta=theta, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
x = apply_affine_transform(x, tx=tx, ty=ty, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
x = apply_affine_transform(x, shear=shear, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: %s' % (zoom_range,))
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
x = apply_affine_transform(x, zx=zx, zy=zy, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval)
return x
def apply_channel_shift(x, intensity, channel_axis=0):
"""Performs a channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + intensity,
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_channel_shift(x, intensity_range, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity_range: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
intensity = np.random.uniform(-intensity_range, intensity_range)
return apply_channel_shift(x, intensity, channel_axis=channel_axis)
def apply_brightness_shift(x, brightness):
"""Performs a brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness: Float. The new brightness value.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if ImageEnhance is None:
raise ImportError('Using brightness shifts requires PIL. '
'Install PIL or Pillow.')
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
x = imgenhancer_Brightness.enhance(brightness)
x = img_to_array(x)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % (brightness_range,))
u = np.random.uniform(brightness_range[0], brightness_range[1])
return apply_brightness_shift(x, u)
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1,
row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0.):
"""Applies an affine transformation specified by the parameters given.
# Arguments
x: 2D numpy array, single image.
theta: Rotation angle in degrees.
tx: Width shift.
ty: Heigh shift.
shear: Shear angle in degrees.
zx: Zoom in x direction.
zy: Zoom in y direction
row_axis: Index of axis for rows in the input image.
col_axis: Index of axis for columns in the input image.
channel_axis: Index of axis for channels in the input image.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
if scipy is None:
raise ImportError('Image transformations require SciPy. '
'Install SciPy.')
transform_matrix = None
if theta != 0:
theta = np.deg2rad(theta)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shift_matrix
else:
transform_matrix = np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear = np.deg2rad(shear)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shear_matrix
else:
transform_matrix = np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = zoom_matrix
else:
transform_matrix = np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [scipy.ndimage.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format='channels_last', scale=True, dtype='float32'):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
dtype: Dtype to use.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=dtype)
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape: %s' % (x.shape,))
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format: %s' % data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 4:
# RGBA
return pil_image.fromarray(x.astype('uint8'), 'RGBA')
elif x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: %s' % (x.shape[2],))
def img_to_array(img, data_format='channels_last', dtype='float32'):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
dtype: Dtype to use for the returned array.
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: %s' % data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=dtype)
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: %s' % (x.shape,))
return x
def save_img(path,
x,
data_format='channels_last',
file_format=None,
scale=True,
**kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'):
warnings.warn('The JPG format does not support '
'RGBA images, converting to RGB.')
img = img.convert('RGB')
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, color_mode='rgb', target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
color_mode: One of "grayscale", "rbg", "rgba". Default: "rgb".
The desired image format.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if grayscale is True:
warnings.warn('grayscale is deprecated. Please use '
'color_mode = "grayscale"')
color_mode = 'grayscale'
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if color_mode == 'grayscale':
if img.mode != 'L':
img = img.convert('L')
elif color_mode == 'rgba':
if img.mode != 'RGBA':
img = img.convert('RGBA')
elif color_mode == 'rgb':
if img.mode != 'RGB':
img = img.convert('RGB')
else:
raise ValueError('color_mode must be "grayscale", "rbg", or "rgba"')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f.lower())]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats
in the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats
in the interval [-1.0, +1.0).
brightness_range: Tuple or list of two floats. Range for picking
a brightness shift value from.
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(after applying all other transformations).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
dtype: Dtype to use for the generated arrays.
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
Example of using ```.flow_from_dataframe(dataframe, directory,
x_col, y_col,
has_ext)```:
```python
train_df = pandas.read_csv("./train.csv")
valid_df = pandas.read_csv("./valid.csv")
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory='data/train',
x_col="filename",
y_col="class",
has_ext=True,
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_dataframe(
dataframe=valid_df,
directory='data/validation',
x_col="filename",
y_col="class",
has_ext=True,
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format='channels_last',
validation_split=0.0,
dtype='float32'):
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
self.dtype = dtype
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % (zoom_range,))
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x,
y=None, batch_size=32, shuffle=True,
sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes data & label arrays, generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, in case
of RGB data, it should have value 3, and in case
of RGBA data, it should have value 4.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](
https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg", "rgba". Default: "rgb".
Whether the images will be converted to
have 1, 3, or 4 channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def flow_from_dataframe(self, dataframe, directory,
x_col="filename", y_col="class", has_ext=True,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest'):
"""Takes the dataframe and the path to a directory
and generates batches of augmented/normalized data.
# A simple tutorial can be found at: http://bit.ly/keras_flow_from_dataframe
# Arguments
dataframe: Pandas dataframe containing the filenames of the
images in a column and classes in another or column/s
that can be fed as raw target data.
directory: string, path to the target directory that contains all
the images mapped in the dataframe.
x_col: string, column in the dataframe that contains
the filenames of the target images.
y_col: string or list of strings,columns in
the dataframe that will be the target data.
has_ext: bool, True if filenames in dataframe[x_col]
has filename extensions,else False.
target_size: tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images
found will be resized.
color_mode: one of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to have
1 or 3 color channels.
classes: optional list of classes
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the y_col,
which will map to the label indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: one of "categorical", "binary", "sparse",
"input", "other" or None. Default: "categorical".
Determines the type of label arrays that are returned:
- `"categorical"` will be 2D one-hot encoded labels,
- `"binary"` will be 1D binary labels,
- `"sparse"` will be 1D integer labels,
- `"input"` will be images identical
to input images (mainly used to work with autoencoders).
- `"other"` will be numpy array of y_col data
- None, no labels are returned (the generator will only
yield batches of image data, which is useful to use
`model.predict_generator()`, `model.evaluate_generator()`, etc.).
batch_size: size of the batches of data (default: 32).
shuffle: whether to shuffle the data (default: True)
seed: optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: whether to follow symlinks inside class subdirectories
(default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`, and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed, `"box"` and
`"hamming"` are also supported. By default, `"nearest"` is used.
# Returns
A DataFrameIterator yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DataFrameIterator(dataframe, directory, self,
x_col=x_col, y_col=y_col, has_ext=has_ext,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + 1e-6)
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + 1e-6)
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def get_random_transform(self, img_shape, seed=None):
"""Generates random parameters for a transformation.
# Arguments
seed: Random seed.
img_shape: Tuple of integers.
Shape of the image that is transformed.
# Returns
A dictionary containing randomly chosen parameters describing the
transformation.
"""
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
if seed is not None:
np.random.seed(seed)
if self.rotation_range:
theta = np.random.uniform(
-self.rotation_range,
self.rotation_range)
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= img_shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= img_shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(
-self.shear_range,
self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self.vertical_flip
channel_shift_intensity = None
if self.channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self.channel_shift_range,
self.channel_shift_range)
brightness = None
if self.brightness_range is not None:
if len(self.brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % (self.brightness_range,))
brightness = np.random.uniform(self.brightness_range[0],
self.brightness_range[1])
transform_parameters = {'theta': theta,
'tx': tx,
'ty': ty,
'shear': shear,
'zx': zx,
'zy': zy,
'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
return transform_parameters
def apply_transform(self, x, transform_parameters):
"""Applies a transformation to an image according to given parameters.
# Arguments
x: 3D tensor, single image.
transform_parameters: Dictionary with string - parameter pairs
describing the transformation.
Currently, the following parameters
from the dictionary are used:
- `'theta'`: Float. Rotation angle in degrees.
- `'tx'`: Float. Shift in the x direction.
- `'ty'`: Float. Shift in the y direction.
- `'shear'`: Float. Shear angle in degrees.
- `'zx'`: Float. Zoom in the x direction.
- `'zy'`: Float. Zoom in the y direction.
- `'flip_horizontal'`: Boolean. Horizontal flip.
- `'flip_vertical'`: Boolean. Vertical flip.
- `'channel_shift_intencity'`: Float. Channel shift intensity.
- `'brightness'`: Float. Brightness shift intensity.
# Returns
A transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
x = apply_affine_transform(x, transform_parameters.get('theta', 0),
transform_parameters.get('tx', 0),
transform_parameters.get('ty', 0),
transform_parameters.get('shear', 0),
transform_parameters.get('zx', 1),
transform_parameters.get('zy', 1),
row_axis=img_row_axis,
col_axis=img_col_axis,
channel_axis=img_channel_axis,
fill_mode=self.fill_mode,
cval=self.cval)
if transform_parameters.get('channel_shift_intensity') is not None:
x = apply_channel_shift(x,
transform_parameters['channel_shift_intensity'],
img_channel_axis)
if transform_parameters.get('flip_horizontal', False):
x = flip_axis(x, img_col_axis)
if transform_parameters.get('flip_vertical', False):
x = flip_axis(x, img_row_axis)
if transform_parameters.get('brightness') is not None:
x = apply_brightness_shift(x, transform_parameters['brightness'])
return x
def random_transform(self, x, seed=None):
"""Applies a random transformation to an image.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
params = self.get_random_transform(x.shape, seed)
return self.apply_transform(x, params)
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Fits the data generator to some sample data.
This computes the internal data stats related to the
data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, in case
of RGB data, it should have value 3, and in case
of RGBA data, it should have value 4.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=self.dtype)
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=self.dtype)
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + 1e-6)
if self.zca_whitening:
if scipy is None:
raise ImportError('Using zca_whitening requires SciPy. '
'Install SciPy.')
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = scipy.linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(object):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def common_init(self, image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation):
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'rgba', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb", "rgba", or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgba':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (4,)
else:
self.image_shape = (4,) + self.target_size
elif self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError(
'Invalid subset name: %s;'
'expected "training" or "validation"' % (subset,))
else:
split = None
self.split = split
self.subset = subset
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
dtype: Dtype to use for the generated arrays.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format='channels_last',
save_to_dir=None, save_prefix='', save_format='png',
subset=None, dtype='float32'):
self.dtype = dtype
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
self.x = np.asarray(x, dtype=self.dtype)
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3, or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=self.dtype)
for i, j in enumerate(index_array):
x = self.x[j]
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(
x.astype(self.dtype), params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links, df=False):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
df: boolean
# Returns
classes: a list of class indices(returns only if `df=False`)
filenames: if `df=False`,returns the path of valid files in `directory`,
relative from `directory`'s parent (e.g., if `directory` is
"dataset/class1", the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
if `df=True`, returns only the filenames that are found inside the
provided directory (e.g., if `directory` is
"dataset/", the filenames will be
`["file1.jpg", "file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
if df:
filenames = []
for root, fname in valid_files:
filenames.append(os.path.basename(fname))
return filenames
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`.
Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
dtype: Dtype to use for generated arrays.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format='channels_last',
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dtype='float32'):
super(DirectoryIterator, self).common_init(image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation)
self.directory = directory
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.dtype = dtype
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=self.split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, self.split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=self.dtype)
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
color_mode=self.color_mode,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
# Pillow images should be closed after `load_img`,
# but not PIL images.
if hasattr(img, 'close'):
img.close()
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(x, params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(self.dtype)
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=self.dtype)
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
class DataFrameIterator(Iterator):
"""Iterator capable of reading images from a directory on disk
through a dataframe.
# Arguments
dataframe: Pandas dataframe containing the filenames of the
images in a column and classes in another or column/s
that can be fed as raw target data.
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
if used with dataframe,this will be the directory to under which
all the images are present.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
x_col: Column in dataframe that contains all the filenames.
y_col: Column/s in dataframe that has the target data.
has_ext: bool, Whether the filenames in x_col has extensions or not.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`.
Color mode to read images.
classes: Optional list of strings, names of
each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`"other"`: targets are the data(numpy array) of y_col data
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, dataframe, directory, image_data_generator,
x_col="filenames", y_col="class", has_ext=True,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dtype='float32'):
super(DataFrameIterator, self).common_init(image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation)
try:
import pandas as pd
except ImportError:
raise ImportError('Install pandas to use flow_from_dataframe.')
if type(x_col) != str:
raise ValueError("x_col must be a string.")
if type(has_ext) != bool:
raise ValueError("has_ext must be either True if filenames in"
" x_col has extensions,else False.")
self.df = dataframe.drop_duplicates(x_col)
self.df[x_col] = self.df[x_col].astype(str)
self.directory = directory
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', 'other', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
'"other" or None.')
self.class_mode = class_mode
self.dtype = dtype
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
if class_mode not in ["other", "input", None]:
classes = list(self.df[y_col].unique())
else:
if class_mode in ["other", "input", None]:
raise ValueError('classes cannot be set if class_mode'
' is either "other" or "input" or None.')
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
self.samples = _count_valid_files_in_directory(
directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=self.split)
if self.num_classes > 0:
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
else:
print('Found %d images.' % self.samples)
# Second, build an index of the images.
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
filenames = _list_valid_filenames_in_directory(
directory,
white_list_formats,
self.split,
class_indices=self.class_indices,
follow_links=follow_links,
df=True)
if class_mode not in ["other", "input", None]:
if has_ext:
ext_exist = False
for ext in white_list_formats:
if self.df.loc[0, x_col].endswith("." + ext):
ext_exist = True
break
if not ext_exist:
raise ValueError('has_ext is set to True but'
' extension not found in x_col')
temp_df = pd.DataFrame({x_col: filenames}, dtype=str)
temp_df = self.df.merge(temp_df, how='right', on=x_col)
temp_df = temp_df.set_index(x_col)
temp_df = temp_df.reindex(filenames)
classes = temp_df[y_col].values
else:
filenames_without_ext = [f[:-1 * (len(f.split(".")[-1]) + 1)]
for f in filenames]
temp_df = pd.DataFrame({x_col: filenames_without_ext}, dtype=str)
temp_df = self.df.merge(temp_df, how='right', on=x_col)
temp_df = temp_df.set_index(x_col)
temp_df = temp_df.reindex(filenames_without_ext)
classes = temp_df[y_col].values
self.df = temp_df.copy()
self.classes = np.array([self.class_indices[cls] for cls in classes])
elif class_mode == "other":
self.data = self.df[y_col].values
if "object" in list(self.df[y_col].dtypes):
raise TypeError("y_col column/s must be numeric datatypes.")
self.filenames = filenames
super(DataFrameIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=self.dtype)
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
color_mode=self.color_mode,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
# Pillow images should be closed after `load_img`,
# but not PIL images.
if hasattr(img, 'close'):
img.close()
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(x, params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(self.dtype)
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=self.dtype)
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
elif self.class_mode == 'other':
batch_y = self.data[index_array]
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
the-stack_0_7507 | import os
import json
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.websocket
from tornado.web import url
from gensim.models.word2vec import Word2Vec
import gensim
import re
import requests
class IndexHandler(tornado.web.RequestHandler):
"""Main handler."""
def get(self, *args, **kwargs):
self.render('index.html')
class W2vhookHandler(tornado.web.RequestHandler):
"""RESTful API handler."""
#def set_default_headers(self):
#print("setting headers!!!")
#self.set_header("Access-Control-Allow-Origin", "*")
#self.set_header("Access-Control-Allow-Headers", "x-requested-with")
#self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
def get(self, *args, **kwargs):
self.render('index.html')
def post(self, *args, **kwargs):
try:
data = json.loads(self.request.body.decode('utf-8'))
except:
print("slack webhook access.")
try:
username = self.get_argument('user_name')
except:
username = data['user_name']
try:
text = self.get_argument('text').replace('w2v:', '').strip().replace(' ', '')
except:
text = data['text'].replace('w2v:', '').strip().replace(' ', '')
pos, neg = self._proc(text)
try:
result = wordvec.most_similar(positive=pos, negative=neg)
except KeyError as err:
result = str(err)
if username == 'webapp':
response = {'text': result}
else:
response = {'text': str(result)}
self.write(json.dumps(response))
@staticmethod
def _proc(var):
div = re.split('([+-+-])', var)
sign = 0
pos = list()
neg = list()
for i in range(len(div)):
if div[i] == '+' or div[i] == '+':
sign = 0
elif div[i] == '-' or div[i] == '-':
sign = 1
else:
pos.append(div[i]) if sign == 0 else neg.append(div[i])
return (pos, neg)
class Application(tornado.web.Application):
"""Application configuration"""
def __init__(self, debug):
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
tornado.web.Application.__init__(self,
[
url(r'/', IndexHandler, name='index'),
url(r'/w2vhook', W2vhookHandler, name='w2vhook'),
],
template_path=os.path.join(BASE_DIR, 'templates'),
static_path=os.path.join(BASE_DIR, 'static'),
debug=debug
)
if __name__ == '__main__':
with open("config.json") as json_data:
config = json.load(json_data)
print("loading word2vec model...")
if config['fasttext']:
wordvec = gensim.models.KeyedVectors.load_word2vec_format(config['model'], binary=False)
else:
model = Word2Vec.load(config['model'])
wordvec = model.wv
print("Word2vec model load complete.")
app = Application(config['debug'])
server = tornado.httpserver.HTTPServer(app)
server.listen(8000)
tornado.ioloop.IOLoop.instance().start()
|
the-stack_0_7508 | import os
import pprint
import pymongo
import datetime
import numpy as np
import configparser
from openbox.optimizer import _optimizers
from openbox.utils.start_smbo import create_smbo
from openbox.utils.config_space.space_utils import get_config_space_from_dict
# Read configuration from file.
conf_dir = './conf'
config_path = os.path.join(conf_dir, 'service.conf')
config = configparser.ConfigParser()
config.read(config_path)
name_server = dict(config.items('database'))
host = name_server['database_address']
port = name_server['database_port']
username = name_server['user']
password = name_server['password']
my_url = 'mongodb://' + username + ':' + password + '@%s:%s/' % (host, port)
# Connect to the local MongoDB
myclient = pymongo.MongoClient(my_url)
mydb = myclient[username]
def branin(x):
xs = x.get_dictionary()
x1 = xs['x1']
x2 = xs['x2']
a = 1.
b = 5.1 / (4. * np.pi ** 2)
c = 5. / np.pi
r = 6.
s = 10.
t = 1. / (8. * np.pi)
ret = a * (x2 - b * x1 ** 2 + c * x1 - r) ** 2 + s * (1 - t) * np.cos(x1) + s
return {'objs': (ret,)}
task_config = {
"optimizer": "SMBO",
"parameters": {
"x1": {
"type": "float",
"bound": [-5, 10],
"default": 0
},
"x2": {
"type": "float",
"bound": [0, 15]
},
},
"advisor_type": 'default',
"max_runs": 50,
"surrogate_type": 'gp',
"time_limit_per_trial": 5,
"logging_dir": 'logs',
"task_id": 'hp1'
}
def test_insertion():
# Create table & insert data
user_collection = mydb.user_collection # creating a new table (so easy)
post1 = {'id': 0, 'username': 'example_username',
'email': '[email protected]', 'pwd': 'example_pwd',
'salt': 'example_salt'}
post_id_1 = user_collection.insert_one(post1).inserted_id
item = user_collection.find_one({'username':'example_username'})
pprint.pprint(item)
def test_task_manipulation():
"""
MongoDB command: db.tasks.find()
Returns
-------
"""
# Create table & insert data
task_collection = mydb.tasks
new_task = {'task_name': 'quick_start', 'task_config': task_config}
_ = task_collection.insert_one(new_task).inserted_id
item = task_collection.find_one({'task_name': 'quick_start'})
pprint.pprint(item)
print(type(item))
def test_task_manipulation1():
"""
Show Usage about Runhistory.
Returns
-------
"""
runhistory_collection = mydb.runhistory
optimizer_name = task_config['optimizer']
optimizer_class = _optimizers[optimizer_name]
config_space = get_config_space_from_dict(task_config)
task_config.pop('optimizer', None)
task_config.pop('parameters', None)
task_config.pop('conditions', None)
optimizer = optimizer_class(branin, config_space, **task_config)
for _ in range(10):
config, trial_state, objs, trial_info = optimizer.iterate()
print(config, objs)
new_history = {'task_id': 'abc', 'config': config.get_dictionary(), 'result': list(objs), 'trial_status': trial_state}
id_ = runhistory_collection.insert_one(new_history).inserted_id
print(id_)
item = runhistory_collection.find_one({'task_id': 'abc'})
pprint.pprint(item)
if __name__ == "__main__":
# test_insertion()
# test_task_manipulation()
test_task_manipulation1()
|
the-stack_0_7510 | # -*- coding: utf-8 -*-
from furl import furl
from scrapy.spiders import CrawlSpider as BaseSpider, signals
from scrapy_splash import SplashRequest
from scrapy import Request
from gerapy.server.core.utils import str2list, str2dict, str2body
from scrapy.spiders.crawl import Rule as BaseRule
class Rule(BaseRule):
def __init__(self, link_extractor, method='GET', data=None, params=None, headers=None,
callback=None, cb_kwargs=None, follow=None, priority=0, dont_filter=False,
meta=None, proxy=None, render=False, dont_redirect=None, dont_retry=None,
handle_httpstatus_list=None, handle_httpstatus_all=None,
dont_cache=None, dont_obey_robotstxt=None,
download_timeout=None, max_retry_times=None,
process_links=None, process_request=lambda x: x, process_body=None):
self.link_extractor = link_extractor
self.callback = callback
self.method = method
self.data = str2body(data)
self.params = str2dict(params)
self.headers = str2dict(headers)
self.priority = priority
self.dont_filter = dont_filter
self.meta = str2dict(meta)
self.cb_kwargs = str2dict(cb_kwargs)
self.proxy = proxy
self.render = render
self.dont_redirect = dont_redirect
self.dont_retry = dont_retry
self.handle_httpstatus_list = str2list(handle_httpstatus_list, lambda x: int(x))
self.handle_httpstatus_all = handle_httpstatus_all
self.dont_cache = dont_cache
self.dont_obey_robotstxt = dont_obey_robotstxt
self.download_timeout = download_timeout
self.max_retry_times = max_retry_times
self.process_links = process_links
self.process_request = process_request
self.process_body = process_body
if follow is None:
self.follow = False if callback else True
else:
self.follow = follow
def __str__(self):
"""
object to str
:return:
"""
return str(self.__dict__.items())
class CrawlSpider(BaseSpider):
name = None
def start_requests(self):
"""
override start requests
:return:
"""
self.crawler.signals.connect(self.make_start_requests, signal=signals.spider_idle)
return []
def make_start_requests(self):
"""
make start requests
:return:
"""
for request in self.start():
self.crawler.engine.slot.scheduler.enqueue_request(request)
def start(self):
"""
start requests
:return:
"""
for url in self.make_start_urls():
yield Request(url)
def make_start_urls(self):
"""
get start urls
:return:
"""
return self.start_urls
def splash_request(self, request, args=None):
"""
change request to SplashRequest
:param request:
:param args:
:return:
"""
args = args if args else {'wait': 1, 'timeout': 30}
meta = request.meta
meta.update({'url': request.url})
return SplashRequest(url=request.url, dont_process_response=True, args=args, callback=request.callback,
meta=meta)
def _generate_request(self, index, rule, link, response):
"""
generate request by rule
:param index: rule index
:param rule: rule object
:param link: link object
:return: new request object
"""
url = furl(link.url).add(rule.params).url if rule.params else link.url
# init request body
body = None
# process by method
if rule.method.upper() == 'POST':
# if process_body defined, use its result
if callable(rule.process_body):
body = rule.process_body(response)
# if data defined in rule, use data
if rule.data:
body = rule.data
r = Request(url=url, method=rule.method, body=body, headers=rule.headers,
priority=rule.priority,
dont_filter=rule.dont_filter, callback=self._response_downloaded)
# update meta args
r.meta.update(**rule.meta)
meta_items = ['dont_redirect', 'dont_retry', 'handle_httpstatus_list', 'handle_httpstatus_all',
'dont_cache', 'dont_obey_robotstxt', 'download_timeout', 'max_retry_times', 'proxy', 'render']
meta_args = {meta_item: getattr(rule, meta_item) for meta_item in meta_items if
not getattr(rule, meta_item) is None}
# update extra meta args
r.meta.update(**meta_args)
return r
def _requests_to_follow(self, response):
"""
requests to follow
:param response:
:return:
"""
seen = set()
for index, rule in enumerate(self._rules):
links = [lnk for lnk in rule.link_extractor.extract_links(response)
if lnk not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
# change _build_request to _generate_request
r = self._generate_request(index, rule, link, response)
yield rule.process_request(r)
|
the-stack_0_7511 | import tensorflow as tf
init_val = tf.random_normal(shape=(1, 5), mean=0, stddev=1)
# var = tf.Variable(init_val, name='var')
var = tf.get_variable(name='var', shape=(1, 5), initializer=tf.random_normal_initializer(mean=0, stddev=1))
with tf.variable_scope(name_or_scope='', reuse=True):
"""tf.variable_scope 를 사용하면 변수를 공유할 수 있다."""
var_1 = tf.get_variable(name='var')
print('pre run: \n{}'.format(var))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
post_var, post_var_1 = sess.run([var, var_1])
print('\npost run: \n{}'.format(post_var))
print('\npost run: \n{}'.format(post_var_1)) |
the-stack_0_7512 | # -*- coding: utf-8 -*-
import sys
sys.path.append('.')
from decimal import *
print (sys.argv[1:])
_table = sys.argv[1]
parametro1 = _table.split('.')
_table = parametro1[0]
_prov = int(sys.argv[2])
_dpto = int(sys.argv[3])
_frac = int(sys.argv[4])
_radio = int(sys.argv[5])
#definición de funciones de adyacencia y operaciones sobre manzanas
def son_adyacentes(este, aquel):
return aquel in adyacentes[este]
# calcula el componente conexo que contiene a este,
# para calcular las componentes conexas o contiguas luego de una extracción
def clausura_conexa(este, esos):
# se puede ir de este a ese para todo ese en esos
if este not in esos:
return [] # caso seguro
else:
clausura = [este] # al menos contiene a este
i = 0
while i < len(clausura): # i es el puntero lo que que falta expandir
# i se incrementa de a 1 expandiendo de a 1 las adyacencias
# hasta que la variable clausura no se expande más,
# queda en un puntos fijo, i.e. es una clausura
adyacentes_i = [ese for ese in adyacentes[clausura[i]] if ese in esos]
# los adyacentes a la i-ésimo elemento de la clausura que están en la coleccion
nuevos = [ese for ese in adyacentes_i if ese not in clausura] # no agragados aún
clausura.extend(nuevos) # se agregan al final las adyacencias no agregadas
i = i + 1
return list(set(clausura))
def conectados(estos):
# True si coleccion es conexo, no hay partes separadas,
if not estos: # es vacio
return True
else:
este = estos[0] # este es cualquiera, se elije el primero
return len(clausura_conexa(este, estos)) == len(estos)
# extraer un componente
def extraer(este, estos):
# devuelve la lista de partes conexas resultado de remover la manzana m del segmento
if este not in estos:
return []
else:
esos = list(estos) # copia para no modificar el original
esos.remove(este)
partes = []
while esos: # es no vacia
ese = esos[0] # se elige uno cualquiera, se usa el 1ro
clausura_de_ese_en_esos = clausura_conexa(ese, esos)
for aquel in clausura_de_ese_en_esos:
if aquel not in esos: # (?) cómo puede ser?????
# pass
raise Exception("elemento " + str(aquel) + " no está en " + str(esos)
+ "\nclausura_de_ese_en_esos " + str(clausura_de_ese_en_esos))
else: # para que no se rompa acá....
esos.remove(aquel) # en esos queda el resto no conexo a aquel
partes.append(clausura_de_ese_en_esos)
return partes
# transferir un componente de un conjunto a otro
def transferir(este, estos, esos):
# transferir este del segmento origen al segmento destino
# devuelve una lista con 2 elementoe ... los nuevos estos y esos
if not conectados(esos + [este]): # no puedo transferir
return False
elif len(estos) == 1: # no queda resto, se fusiona origen con destino
return [estos + esos]
else:
return extraer(este, estos) + [esos + [este]]
def carga(estos):
conteos = [viviendas[este] for este in estos]
return sum(conteos)
def cuantas_manzanas(estos):
tuplas = [cmpt for cmpt in estos if type(cmpt) is tuple]
mzas = [mza for (mza, lado) in tuplas]
mzas.extend([cmpt for cmpt in estos if type(cmpt) is int])
return len(set(mzas))
def adyacencias_componentes(estos):
#return [este for este in estos]
return [(este, ese) for este in estos for ese in estos if (este, ese) in adyacencias]
def costo_adyacencia(esta):
(este, ese) = esta
if type(este) is int:
este = (este, 0)
if type(ese) is int:
ese = (ese, 0)
costos = [c_a[2] for c_a in costos_adyacencias if (c_a[0], c_a[1]) == (este, ese)]
if costos:
return costos[0]
#################################################################################
#
# definición de funcion de costo
# y relativas a la calidad del segmento y la segmentación
#
# caso 1
cantidad_de_viviendas_deseada_por_segmento = 20
cantidad_de_viviendas_maxima_deseada_por_segmento = 23
cantidad_de_viviendas_minima_deseada_por_segmento = 17
cantidad_de_viviendas_permitida_para_romper_manzana = 5
multa_fuera_rango_superior = 1e3
multa_fuera_rango_inferior = 1e3
if len(sys.argv) > 7:
cantidad_de_viviendas_minima_deseada_por_segmento = int(sys.argv[6])
cantidad_de_viviendas_maxima_deseada_por_segmento = int(sys.argv[7])
if len(sys.argv) > 8:
cantidad_de_viviendas_deseada_por_segmento = int(sys.argv[8])
if len(sys.argv) > 9:
cantidad_de_viviendas_permitida_para_romper_manzana = int(sys.argv[9])
def costo(segmento):
# segmento es una lista de manzanas
carga_segmento = carga(segmento)
mzas_segmento = cuantas_manzanas(segmento)
adyacencias_segmento = adyacencias_componentes(segmento)
costo_adyacencias = sum(costo_adyacencia(ady) for ady in adyacencias_segmento if costo_adyacencia(ady))
if carga_segmento == 0:
return 10000
if carga_segmento > cantidad_de_viviendas_maxima_deseada_por_segmento:
# la carga es mayor el costo es el cubo
costo = (abs(carga_segmento - cantidad_de_viviendas_maxima_deseada_por_segmento)
*abs(carga_segmento - cantidad_de_viviendas_maxima_deseada_por_segmento)
*abs(carga_segmento - cantidad_de_viviendas_maxima_deseada_por_segmento)
+ (carga_segmento - cantidad_de_viviendas_deseada_por_segmento)
+ multa_fuera_rango_superior)
elif carga_segmento < cantidad_de_viviendas_minima_deseada_por_segmento:
# la carga es menor el costo es el cubo
costo = (abs(cantidad_de_viviendas_minima_deseada_por_segmento - carga_segmento)
*abs(cantidad_de_viviendas_minima_deseada_por_segmento - carga_segmento)
*abs(cantidad_de_viviendas_minima_deseada_por_segmento - carga_segmento)
+ abs(carga_segmento - cantidad_de_viviendas_deseada_por_segmento)
+ multa_fuera_rango_inferior)
else: # está entre los valores deseados
# el costo el la diferencia absoluta al valor esperado
costo = abs(carga_segmento - cantidad_de_viviendas_deseada_por_segmento)
return costo + 5*mzas_segmento + costo_adyacencias
"""
# otro caso, costo en rango, cuadrático por arriba y lineal por abajo
if carga_segmento > cantidad_de_viviendas_deseada_por_segmento:
return (carga_segmento - cantidad_de_viviendas_deseada_por_segmento)**4
else:
return (cantidad_de_viviendas_deseada_por_segmento - carga_segmento)**2
"""
def seg_id(segmento):
tuplas = [cmpt for cmpt in segmento if type(cmpt) is tuple]
if tuplas:
min_m, min_l = min(tuplas)
else:
min_m = None
ints = [cmpt for cmpt in segmento if type(cmpt) is int]
if ints:
min_mza = min(ints)
else:
min_mza = None
if not min_m:
return (min_mza, 0)
if min_mza and min_mza < min_m:
return (min_mza, 0)
else:
return (min_m, min_l)
def cmpt_id(cmpt):
if type(cmpt) is tuple:
return cmpt
else:
return (cmpt, 0)
#####################################################################################
def costos_segmentos(segmentacion):
# segmentacion es una lista de segmentos
return map(costo, segmentacion)
# la lista de costos de los segmentos
def costo_segmentacion(segmentacion):
# segmentacion es una lista de segmentos
# cantidad_de_segmentos = len(segmentacion)
# if cantidad_de_segmentos <= 2:
return sum(costos_segmentos(segmentacion))
# # suma la aplicación de costo a todos los segmentos
# else:
# return sum(costos_segmentos(segmentacion)) + 1e6*cantidad_de_segmentos
# definicón del vecindario de una segmentacíon para definir y recorrer la red de segementaciones
# vecindario devuelve array de vecinos usando extraer y transferir
def vecinos(segmento, segmentacion):
sgm = list(segmento)
vecinos = []
# extracciones
for este in sgm:
sgm2 = list(segmento)
vecinos.append(este)
vecinos.extend(extraer(este, sgm2))
# transferencias
for este in sgm:
for otro in segmentacion:
for ese in otro:
if (este, ese) in adyacencias:
otr = list(otro)
# vecinos.extend(extraer(este, list(segmento)))
# ya agregado en extracciones
vecinos.append(otr.append(este))
return vecinos
def vecindario(segmentacion):
# devuelve array de vecinos
vecindario = []
# extracciones
for segmento in segmentacion:
sgms = list(segmentacion)
sgms.remove(segmento) # el resto no considerado de la segmentación
if len(segmento) == 2: # segmento binario se parte, no se analizan los 2 casos, ya que son el mismo
este = segmento[0]; ese = segmento[1]
vecino = [[este], [ese]] + sgms
vecindario.append(vecino)
elif len(segmento) > 2:
for este in segmento:
vecino = [[este]] + extraer(este, segmento) + sgms
vecindario.append(vecino)
# transferencias
if len(segmentacion) >= 2: # se puede hacer una transferencia
for i, este in enumerate(segmentacion):
esa = list(segmentacion) # copia para preservar la original
esa.remove(este) # elimino de la copia de la segmentacion a este segmento
for j, ese in enumerate(esa): # busco otro segmento
aquella = list(esa) # copia de para eliminar a ese
aquella.remove(ese) # copia de segmentacion sin este ni ese
if len(este) == 1 and len(ese) == 1 and i < j:
pass # si no se repiten cuando este y ese se permuten
else:
for cada in este:
transferencia = transferir(cada, este, ese)
if transferencia: # se pudo hacer
vecino = transferencia + aquella
# print ('transferí', cada, este, ese)
vecindario.append(vecino)
# fusión de 2 segmentos evitando repeticiones
#(cuando alguno es una solo elemento la fusion es considerada en la transferencia)
if len(este) > 1 and len(ese) > 1 and conectados(este + ese):
vecino = [este + ese] + aquella
#print ('transferí', cada, este, ese)
vecindario.append(vecino) # analizar fusiones
return vecindario
# devuelve repeticiones
#
# optimización
#
# fin de definiciones
import psycopg2
import operator
import time
import os
import DAO
dao = DAO.DAO()
#dao.db('segmentador:rodatnemges:censo2020:172.26.67.239')
conexion = [
os.environ.get('MANDARINA_USER', 'alpe'),
os.environ.get('MANDARINA_PASS', 'alpe'),
os.environ.get('MANDARINA_DATABASE', 'CPHyV2020'),
os.environ.get('MANDARINA_HOST', 'localhost'),
os.environ.get('MANDARINA_PORT', '5432')
]
#conexion = ["censo2020", "segmentador", "rodatnemges", "172.26.67.239", "5432"]
if len(sys.argv) > 10:
conn_str = sys.argv[10]
else:
conn_str = ':'.join(conexion)
dao.db(conn_str)
radios = dao.get_radios(_table)
for prov, dpto, frac, radio in radios:
if (radio and prov == _prov and dpto == _dpto and frac == _frac and radio == _radio): # las del _table
print
print ("radio: ")
print (prov, dpto, frac, radio)
conteos_mzas = dao.get_conteos_mzas(_table, prov, dpto, frac, radio)
manzanas = [mza for mza, conteo in conteos_mzas]
conteos = dao.get_conteos_lados(_table, prov, dpto, frac, radio)
conteos_lados = [((mza, lado), conteo) for mza, lado, conteo in conteos]
lados = [(mza, lado) for mza, lado, conteo in conteos]
costos_adyacencias = [((mza, lado), (mza_ady, lado_ady), costo) for mza, lado, mza_ady, lado_ady, costo
in dao.get_costos_adyacencias(_table, prov, dpto, frac, radio)]
adyacencias_mzas_mzas = dao.get_adyacencias_mzas_mzas(_table, prov, dpto, frac, radio)
adyacencias_mzas_lados = [(mza, (mza_ady, lado_ady)) for mza, mza_ady, lado_ady
in dao.get_adyacencias_mzas_lados(_table, prov, dpto, frac, radio)]
adyacencias_lados_mzas= [((mza, lado), mza_ady) for mza, lado, mza_ady
in dao.get_adyacencias_lados_mzas(_table, prov, dpto, frac, radio)]
lados_enfrentados = [((mza, lado), (mza_ady, lado_ady)) for mza, lado, mza_ady, lado_ady
in dao.get_adyacencias_lados_enfrentados(_table, prov, dpto, frac, radio)]
mzas_enfrente = [((mza, lado), (mza_ady, lado_ady)) for mza, lado, mza_ady, lado_ady
in dao.get_adyacencias_mzas_enfrente(_table, prov, dpto, frac, radio)]
lados_contiguos = [((mza, lado), (mza_ady, lado_ady)) for mza, lado, mza_ady, lado_ady
in dao.get_adyacencias_lados_contiguos(_table, prov, dpto, frac, radio)]
conteos = conteos_mzas
adyacencias = adyacencias_mzas_mzas
dao.close()
print ('Se parten en lados las manzanas con mas de ',
cantidad_de_viviendas_permitida_para_romper_manzana,
' viviendas',)
conteos_excedidos = [(manzana, conteo) for (manzana, conteo) in conteos_mzas
if conteo > cantidad_de_viviendas_permitida_para_romper_manzana]
mzas_excedidas = [mza for mza, conteo in conteos_excedidos]
lados_excedidos = [(mza, lado) for ((mza, lado), conteo) in conteos_lados
if conteo > cantidad_de_viviendas_maxima_deseada_por_segmento]
print ('manzanas a partir:', mzas_excedidas)
print ('lados excedidos:', lados_excedidos)
componentes = [mza for mza in manzanas if mza not in mzas_excedidas]
conteos = [(mza, conteo) for (mza, conteo) in conteos if mza not in mzas_excedidas]
adyacencias = [(mza, mza_ady) for (mza, mza_ady) in adyacencias
if mza not in mzas_excedidas and mza_ady not in mzas_excedidas]
# se eliminana manzanas excedidas
componentes.extend([(mza, lado) for (mza, lado) in lados if mza in mzas_excedidas])
conteos.extend([((mza, lado), conteo) for ((mza, lado), conteo) in conteos_lados
if mza in mzas_excedidas])
adyacencias.extend([((mza, lado), mza_ady) for (mza, lado), mza_ady in adyacencias_lados_mzas
if mza in mzas_excedidas and mza_ady not in mzas_excedidas])
adyacencias.extend([(mza, (mza_ady, lado_ady))
for mza, (mza_ady, lado_ady) in adyacencias_mzas_lados
if mza not in mzas_excedidas and mza_ady in mzas_excedidas])
adyacencias.extend([((mza, lado), (mza_ady, lado_ady))
for (mza, lado), (mza_ady, lado_ady) in lados_enfrentados
if mza in mzas_excedidas and mza_ady in mzas_excedidas])
adyacencias.extend([((mza, lado), (mza_ady, lado_ady))
for (mza, lado), (mza_ady, lado_ady) in lados_contiguos])
########################## poner esto sólo si mza no tiene otra adyacencia
adyacencias.extend([(mza, mza_ady)
for (mza, lado), (mza_ady, lado_ady) in mzas_enfrente
if mza not in mzas_excedidas and mza_ady not in mzas_excedidas
and (mza, mza_ady) not in adyacencias])
###########################################################################################
# se agregan los lados correspondientes a esas manzanas
#
# adyacencias.extend((ese, este) for (este, ese) in adyacencias)
# adyacencias = list(set(adyacencias))
# print (adyacencias)
if len(sys.argv) > 11 and sys.argv[11] == 'filtrar':
adyacencias = [(este, ese) for (este, ese) in adyacencias if este not in lados_excedidos and ese not in lados_excedidos]
componentes = list(set(componentes) - set(lados_excedidos))
# elimina lado con más de cant deseada para aplicarles el otro algoritmo
#---- hasta acá
if adyacencias:
start = time.time()
# crea los dictionary
componentes_en_adyacencias = list(set([cpte for cpte, cpte_ady in adyacencias]).union(set([cpte for cpte_ady, cpte in adyacencias])))
todos_los_componentes = list(set(componentes + componentes_en_adyacencias))
viviendas = dict()
for cpte in componentes:
viviendas[cpte] = 0
for cpte, conteo in conteos:
viviendas[cpte] = int(conteo)
componentes_no_en_adyacencias = list(set(todos_los_componentes) - set(componentes_en_adyacencias))
print ("no están en cobertura", componentes_no_en_adyacencias)
# hay que ponerle nula la lista de adyacencias
adyacentes = dict()
for cpte in todos_los_componentes:
adyacentes[cpte] = list([])
for cpte, adyacente in adyacencias:
adyacentes[cpte] = adyacentes[cpte] + [adyacente]
adyacentes[adyacente] = adyacentes[adyacente] + [cpte]
# optimización
##############################
# soluciones iniciales
soluciones_iniciales = []
# iniciando de un extremo de la red de segmentaciones: segmento único igual a todo el radio
todos_juntos = [componentes]
soluciones_iniciales.append(todos_juntos)
# iniciando del otro extremo de la red de segmentaciones: un segmento por manzana
# TODO: probar un segmento x lado
todos_separados = [[cpte] for cpte in componentes]
soluciones_iniciales.append(todos_separados)
##############################
# TODO: cargar el segmento de la segmentación anterior sgm en segmentacio.conteos para el caso de lados
costo_minimo = float('inf')
for solucion in soluciones_iniciales:
# algoritmo greedy
vecinos = list(vecindario(solucion))
costo_actual = costo_segmentacion(solucion)
# costos_vecinos = map(costo_segmentacion, vecinos)
costos_vecinos = [costo_segmentacion(vecino) for vecino in vecinos]
if not costos_vecinos:
print ('Costos vecinos vacios')
else:
while min(costos_vecinos) < costo_actual: # se puede mejorar
min_id, mejor_costo = min(enumerate(costos_vecinos), key=operator.itemgetter(1))
solucion = vecinos[min_id] # greedy
# print (mejor_costo)
vecinos = list(vecindario(solucion))
costo_actual = mejor_costo
# costos_vecinos = map(costo_segmentacion, vecinos)
costos_vecinos = [costo_segmentacion(vecino) for vecino in vecinos]
if costo_actual < costo_minimo:
costo_minimo = costo_actual
mejor_solucion = solucion
#muestra warnings
if componentes_no_en_adyacencias:
print ("Cuidado: ")
print
print ("no están en adyacencias, cobertura con errores, quizás?", componentes_no_en_adyacencias)
print ("no se les asignó componentes adyacentes y quedaron aisladas")
print
# muestra solución
mejor_solucion.sort(key = seg_id)
print ("---------")
print ("mínimo local")
print ("costo", costo_minimo)
for s, segmento in enumerate(mejor_solucion):
segmento.sort(key = cmpt_id)
print (["segmento", s+1,
"carga", carga(segmento),
"costo", costo(segmento),
"componentes", segmento,
"cuantas_manzanas", cuantas_manzanas(segmento)
])
print ("deseada: %d, máxima: %d, mínima: %d" % (cantidad_de_viviendas_deseada_por_segmento,
cantidad_de_viviendas_maxima_deseada_por_segmento,
cantidad_de_viviendas_minima_deseada_por_segmento))
end = time.time()
print (str(end - start) + " segundos")
# actualiza los valores de segmento en la tabla de polygons para representar graficamente
segmentos = {}
for s, segmento in enumerate(mejor_solucion):
for cpte in segmento:
segmentos[cpte] = s + 1
# por ahora solo junin de los andes buscar la tabla usando una relacion prov, dpto - aglomerado
#------
# update _table = shapes.eAAAAa (usando lados)
#------
dao.reopen()
for cpte in componentes:
dao.set_componente_segmento(_table, prov, dpto, frac, radio, cpte, segmentos[cpte])
else:
print ("sin adyacencias")
# guarda ejecucion
dao.close()
dao.reopen()
import os
pwd = os.path.dirname(os.path.realpath(__file__))
import socket
host = socket.gethostname()
import getpass
user = getpass.getuser()
user_host = user + '@' + host
comando = " ".join(sys.argv[:])
import datetime
dao.set_corrida(comando, user_host, '', prov, dpto, frac, radio, datetime.datetime.now())
dao.close()
|
the-stack_0_7516 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2019-2020 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Routines to compute the trace of the evolution matrix along a trajectory.
"""
import numpy as np
from numba import njit, prange, generated_jit, types
@generated_jit(nopython=True, fastmath=True)
def _compute_single_trajectory_trace(
l: np.ndarray,
c_phi: np.ndarray,
s_phi: np.ndarray,
theta_alpha: float,
theta_beta1: float,
theta_beta3: float,
B_magnitude: float,
B_angle: float,
) -> float:
"""Find the trace of the matrix R_tot^2 in the presence of an in-plane Zeeman field.
Parameters
----------
l: np.ndarray
(n_scat) array, length of each segment
c_phi: np.ndarray
Cosinus of the angle of the trajectory on each segment.
s_phi: np.ndarray
Sinus of the angle of the trajectory on each segment.
theta_alpha: float
Rashba SOI induced rotation per unit length
theta_beta3: float
Cubic Dresselhaus induced rotation per unit length
theta_beta1: float
Linear Dresselhaus induced rotation per unit length
B_magnitude : float
Rotation induced by the Zeeman field in magnitude by unit length
B_angle : float
Rotation induced by the Zeeman field in angle by unit radian
Returns
-----------
trace: float
The trace of the matrix R_tot^2
"""
if l.dtype == types.float32:
fcast = np.float32
ccast = np.complex64
inner_cdtype = np.dtype("complex64")
else:
fcast = np.float64
ccast = np.complex128
inner_cdtype = np.dtype("complex128")
def _inner(
l: np.ndarray,
c_phi: np.ndarray,
s_phi: np.ndarray,
theta_alpha: float,
theta_beta1: float,
theta_beta3: float,
B_magnitude: float,
B_angle: float,
):
# Convert Zeeman to Cartesian
B_x = B_magnitude*np.cos(B_angle)
B_y = B_magnitude*np.sin(B_angle)
# Computation for the clockwise trajectory
rotations = np.empty((len(l), 2, 2), dtype=inner_cdtype)
# Necessary cast to avoid upcasting to 64 bits
theta_alpha = fcast(theta_alpha)
theta_beta1 = fcast(theta_beta1)
theta_beta3 = fcast(theta_beta3)
B_x = fcast(B_x)
B_y = fcast(B_y)
B_x_cw = theta_alpha * s_phi + theta_beta3 * c_phi * s_phi**2 + theta_beta1 * c_phi + B_x
B_y_cw = -theta_alpha * c_phi - theta_beta3 * s_phi * c_phi**2 - theta_beta1 * s_phi + B_y
B_cw = np.sqrt(B_x_cw ** 2 + B_y_cw ** 2)
theta_cw = B_cw * l
# Necessary cast to avoid upcasting to 64 bits
c_theta_cw = np.cos(fcast(0.5) * theta_cw)
s_theta_cw = np.sin(fcast(0.5) * theta_cw)
psi1_cw = np.empty(len(l), dtype=inner_cdtype)
psi2_cw = np.empty(len(l), dtype=inner_cdtype)
for i, (b, bx, by) in enumerate(zip(B_cw, B_x_cw, B_y_cw)):
if b != 0:
# Necessary cast to avoid upcasting to 128 bits
psi1_cw[i] = -ccast(1j) * (bx / b - ccast(1j) * by / b)
psi2_cw[i] = -ccast(1j) * (bx / b + ccast(1j) * by / b)
else:
psi1_cw[i] = psi2_cw[i] = 0
rotations[:, 0, 0] = c_theta_cw
rotations[:, 0, 1] = psi1_cw * s_theta_cw
rotations[:, 1, 0] = psi2_cw * s_theta_cw
rotations[:, 1, 1] = c_theta_cw
# For 2x2 matrices calling BLAS matrix multiplication has a large overhead
# and the need to allocate the output matrix is likely to cause issue with
# parallelization of the code.
cw_rot = np.array([[1, 0], [0, 1]], dtype=inner_cdtype)
for i in range(0, len(l)):
# equivalent to cw_rot = r @ cw_rot
r = rotations[i]
a = r[0, 0] * cw_rot[0, 0] + r[0, 1] * cw_rot[1, 0]
b = r[0, 0] * cw_rot[0, 1] + r[0, 1] * cw_rot[1, 1]
c = r[1, 0] * cw_rot[0, 0] + r[1, 1] * cw_rot[1, 0]
d = r[1, 0] * cw_rot[0, 1] + r[1, 1] * cw_rot[1, 1]
cw_rot[0, 0] = a
cw_rot[0, 1] = b
cw_rot[1, 0] = c
cw_rot[1, 1] = d
# Computation for the counter clock wise trajectory
if B_x != 0.0 or B_y != 0.0:
B_x_ccw = -theta_alpha * s_phi - theta_beta3 * c_phi * s_phi**2 - theta_beta1 * c_phi + B_x
B_y_ccw = theta_alpha * c_phi + theta_beta3 * s_phi * c_phi**2 + theta_beta1 * s_phi + B_y
B_ccw = np.sqrt(B_x_ccw ** 2 + B_y_ccw ** 2)
theta_ccw = B_ccw * l
# Necessary cast to avoid upcasting to 64 bits
c_theta_ccw = np.cos(fcast(0.5) * theta_ccw)
s_theta_ccw = np.sin(fcast(0.5) * theta_ccw)
psi1_ccw = np.empty(len(l), dtype=inner_cdtype)
psi2_ccw = np.empty(len(l), dtype=inner_cdtype)
for i, (b, bx, by) in enumerate(zip(B_ccw, B_x_ccw, B_y_ccw)):
if b != 0:
# Necessary cast to avoid upcasting to 128 bits
psi1_ccw[i] = -ccast(1j) * (bx / b - ccast(1j) * by / b)
psi2_ccw[i] = -ccast(1j) * (bx / b + ccast(1j) * by / b)
else:
psi1_ccw[i] = psi2_ccw[i] = 0
rotations[:, 0, 0] = c_theta_ccw
rotations[:, 0, 1] = psi1_ccw * s_theta_ccw
rotations[:, 1, 0] = psi2_ccw * s_theta_ccw
rotations[:, 1, 1] = c_theta_ccw
ccw_rot = np.array([[1, 0], [0, 1]], dtype=inner_cdtype)
for i in range(len(l) - 1, -1, -1):
# equivalent to ccw_rot = r @ ccw_rot
r = rotations[i]
a = r[0, 0] * ccw_rot[0, 0] + r[0, 1] * ccw_rot[1, 0]
b = r[0, 0] * ccw_rot[0, 1] + r[0, 1] * ccw_rot[1, 1]
c = r[1, 0] * ccw_rot[0, 0] + r[1, 1] * ccw_rot[1, 0]
d = r[1, 0] * ccw_rot[0, 1] + r[1, 1] * ccw_rot[1, 1]
ccw_rot[0, 0] = a
ccw_rot[0, 1] = b
ccw_rot[1, 0] = c
ccw_rot[1, 1] = d
return (
ccw_rot[0, 0].conjugate() * cw_rot[0, 0]
+ ccw_rot[1, 0].conjugate() * cw_rot[1, 0]
+ ccw_rot[0, 1].conjugate() * cw_rot[0, 1]
+ ccw_rot[1, 1].conjugate() * cw_rot[1, 1]
).real
else:
return (
cw_rot[0, 0] * cw_rot[0, 0]
+ cw_rot[0, 1] * cw_rot[1, 0]
+ cw_rot[1, 0] * cw_rot[0, 1]
+ cw_rot[1, 1] * cw_rot[1, 1]
).real
return _inner # type: ignore
@njit(fastmath=True, parallel=True)
def compute_trajectory_traces(
index,
l,
c_phi,
s_phi,
theta_alpha,
theta_beta1,
theta_beta3,
B_magnitude,
B_angle,
):
"""Compute the trace of the evolution operator for different trajectories.
This is run in parallel in batches of 1000.
Parameters
----------
index : np.ndarray
(n_scat, 2) array, with the beginning and end index for each trajectory
l : np.ndarray
(n_scat) array, length of each segment
c_phi : np.ndarray
Cosinus of the angle of the trajectory on each segment.
s_phi : np.ndarray
Sinus of the angle of the trajectory on each segment.
theta_alpha : float
Rashba SOI induced rotation per unit length
theta_beta3 : float
Cubic Dresselhaus induced rotation per unit length
theta_beta1 : float
Linear Dresselhaus induced rotation per unit length
B_magnitude : float
Rotation induced by the Zeeman field in magnitude by unit length
B_angle : float
Rotation induced by the Zeeman field in angle by unit radian
Returns
-------
traces : np.ndarray
1D array of the trace of each trajectory.
"""
N_orbit = np.shape(index)[0]
T = np.empty(N_orbit)
for n in prange(N_orbit // 1000):
r = N_orbit % 1000 if n * 1000 + 999 >= N_orbit else 1000
for i in range(r):
traj_id = n * 1000 + i
begin, end = index[traj_id]
T_a = _compute_single_trajectory_trace(
l[begin:end],
c_phi[begin:end],
s_phi[begin:end],
theta_alpha,
theta_beta1,
theta_beta3,
B_magnitude,
B_angle,
)
T[traj_id] = T_a
return T
|
the-stack_0_7517 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# import urllib
import urllib.request as req
import numpy as np
import tensorflow as tf
# Data sets
IRIS_TRAINING = "iris_training.csv"
IRIS_TRAINING_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST = "iris_test.csv"
IRIS_TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
def main():
# If the training and test sets aren't stored locally, download them.
if not os.path.exists(IRIS_TRAINING):
# raw = urllib.urlopen(IRIS_TRAINING_URL).read()
with req.urlopen(IRIS_TRAINING_URL) as f:
raw = f.read()
print(raw)
raw = raw.decode('utf-8')
with open(IRIS_TRAINING, "w") as f:
f.write(raw)
else:
print("iris_training.csv already exists")
if not os.path.exists(IRIS_TEST):
# raw = urllib.urlopen(IRIS_TEST_URL).read()
with req.urlopen(IRIS_TEST_URL) as f:
raw = f.read()
print(raw)
raw = raw.decode('utf-8')
with open(IRIS_TEST, "w") as f:
f.write(raw)
else:
print("iris_test.scv already exists")
# Load datasets.
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32)
# Specify that all features have real-value data
feature_columns = [tf.feature_column.numeric_column("x", shape=[4])]
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = tf.estimator.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir="./tmp/iris_model")
# Define the training inputs
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(training_set.data)},
y=np.array(training_set.target),
num_epochs=None,
shuffle=True)
# Train model.
classifier.train(input_fn=train_input_fn, steps=2000)
# Define the test inputs
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(test_set.data)},
y=np.array(test_set.target),
num_epochs=1,
shuffle=False)
# Evaluate accuracy.
accuracy_score = classifier.evaluate(input_fn=test_input_fn)["accuracy"]
print("\nTest Accuracy: {0:f}\n".format(accuracy_score))
# Classify two new flower samples.
new_samples = np.array(
[[6.4, 3.2, 4.5, 1.5],
[5.8, 3.1, 5.0, 1.7]], dtype=np.float32)
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": new_samples},
num_epochs=1,
shuffle=False)
predictions = list(classifier.predict(input_fn=predict_input_fn))
predicted_classes = [p["classes"] for p in predictions]
print(
"New Samples, Class Predictions: {}\n"
.format(predicted_classes))
if __name__ == "__main__":
main()
|
the-stack_0_7518 | # -*- coding: utf-8 -*-
# @Author: theo-l
# @Date: 2017-09-08 11:52:55
# @Last Modified by: theo-l
# @Last Modified time: 2017-09-12 15:53:48
import six
import warnings
from fabric.api import run, settings
DEBUG = True
class HostConfig:
def __init__(self, host_config=None, *args, **kwargs):
"""
host_config: a list of tuple about the host information (host_string, password, host_name, host_role)
"""
warnings.warn("Deprecated fabric base, use common.fabrics.HostConfig instead!")
self._config = host_config or []
self.all_host_strings = set()
self.all_host_names = set()
self.all_host_roles = set()
self.host_string_password_map = {}
self.host_string_name_map = {}
self.host_string_role_map = {}
for host in self._config:
self.all_host_strings.add(host[0])
self.all_host_names.add(host[2])
self.all_host_roles.add(host[3])
self.host_string_password_map[host[0]] = host[1]
self.host_string_name_map[host[0]] = host[2]
self.host_string_role_map[host[0]] = host[3]
if DEBUG:
print(self.all_host_names)
print(self.all_host_strings)
print(self.all_host_roles)
print(self.host_string_password_map)
print(self.host_string_name_map)
print(self.host_string_role_map)
self.name_host_strings_map = self._build_role_or_name_host_string_map(reverse_data=self.host_string_name_map)
self.role_host_string_map = self._build_role_or_name_host_string_map(reverse_data=self.host_string_role_map)
if DEBUG:
print(self.name_host_strings_map)
print(self.role_host_string_map)
setattr(self, 'ALL_HOSTS', self.all_host_strings)
for name in self.all_host_names:
setattr(self, '{}_HOSTS'.format(name.upper()), self.name_host_strings_map[name])
setattr(self, 'ALL_ROLES', self.all_host_strings)
for role in self.all_host_roles:
setattr(self, '{}_ROLES'.format(role.upper()), self.role_host_string_map[role])
def setup_fabric_env(self, env):
"""
Use the attribute value to init fabric's env instance, including:
1. hosts
2. passwords
3. roles
"""
env.hosts = self.all_host_strings
env.passwords = self.host_string_password_map
env.roledefs.update(self.role_host_string_map)
@staticmethod
def _build_role_or_name_host_string_map(reverse_data=None):
"""
Build a map data structure: name/role to a list of host strings:
Example:
TEST:[host1, host2, host3,...]
PROD:[host1, host2, host3,...]
"""
if reverse_data is None:
return {}
data = {}
for host_string, role_or_name in reverse_data.iteritems():
data.setdefault(role_or_name, [])
data[role_or_name].append(host_string)
return data
class BaseRequirement(object):
def __init__(self, requirements=None, install_cmd=None, uninstall_cmd=None, update_cmd=None,
install_prompts=None, uninstall_prompts=None, update_prompts=None, **kwargs):
self._requirements = requirements or []
self.requirement_handlers = {
'install': install_cmd,
'uninstall': uninstall_cmd,
'update': update_cmd
}
self.requirements_prompts = {
'install': install_prompts or {},
'uninstall': uninstall_prompts or {},
'update': update_prompts or {}
}
def handle_requirements(self, action, name=None):
"""
Handle requirement depends on the action
"""
if action and not isinstance(action, six.string_types):
raise TypeError('action: {} should be a string type value!'.format(action))
if action not in self.requirement_handlers:
raise ValueError('Unknow action: {}!'.format(action))
if name and not isinstance(name, six.string_types):
raise ValueError('name should be a string type value!')
handler = self.requirement_handlers[action]
requirements = [name] if name else self._requirements
with settings(prompts=self.requirements_prompts[action]):
for name in requirements:
run('{} {}'.format(handler, name))
def install_requirements(self, name=None):
"""
Install the specified requirement with name or install all requirement
"""
self.handle_requirements('install', name)
def update_requirements(self, name=None):
self.handle_requirements('update', name)
def uninstall_requirements(self, name=None):
self.handle_requirements('uninstall', name)
class DebRequirement(BaseRequirement):
"""
Requirement tools to install/unistall package in debian-based OS system
"""
def __init__(self, requirements=None, **kwargs):
kwargs['uninstall_prompts'] = {'Do you want to continue? [Y/n] ': 'Y'}
super(DebRequirement, self).__init__(requirements=requirements,
install_cmd='sudo apt-get install',
uninstall_cmd='sudo apt-get remove',
**kwargs)
|
the-stack_0_7519 | # Copyright 2016 Johns Hopkins University (Dan Povey)
# 2016 Vijayaditya Peddinti
# Apache 2.0.
""" This module contains the parent class from which all layers are inherited
and some basic layer definitions.
"""
from __future__ import print_function
import math
import re
import sys
import libs.nnet3.xconfig.utils as xutils
class XconfigLayerBase(object):
""" A base-class for classes representing layers of xconfig files.
"""
def __init__(self, first_token, key_to_value, all_layers):
"""
first_token: first token on the xconfig line, e.g. 'affine-layer'.f
key_to_value: dictionary with parameter values
{ 'name':'affine1',
'input':'Append(0, 1, 2, ReplaceIndex(ivector, t, 0))',
'dim=1024' }.
The only required and 'special' values that are dealt with directly
at this level, are 'name' and 'input'. The rest are put in
self.config and are dealt with by the child classes' init functions.
all_layers: An array of objects inheriting XconfigLayerBase for all
previously parsed layers.
"""
self.layer_type = first_token
if not 'name' in key_to_value:
raise RuntimeError("Expected 'name' to be specified.")
self.name = key_to_value['name']
if not xutils.is_valid_line_name(self.name):
raise RuntimeError("Invalid value: name={0}".format(
key_to_value['name']))
for prev_layer in all_layers:
if self.name == prev_layer.name:
raise RuntimeError("Name '{0}' is used for more than one "
"layer.".format(self.name))
# the following, which should be overridden in the child class, sets
# default config parameters in self.config.
self.set_default_configs()
# The following is not to be reimplemented in child classes;
# it sets the config values to those specified by the user, and
# parses any Descriptors.
self.set_configs(key_to_value, all_layers)
# This method, sets the derived default config values
# i.e., config values when not specified can be derived from
# other values. It can be overridden in the child class.
self.set_derived_configs()
# the following, which should be overridden in the child class, checks
# that the config parameters that have been set are reasonable.
self.check_configs()
def set_configs(self, key_to_value, all_layers):
""" Sets the config variables.
We broke this code out of __init__ for clarity.
the child-class constructor will deal with the configuration values
in a more specific way.
"""
# First check that there are no keys that don't correspond to any config
# parameter of this layer, and if so, raise an exception with an
# informative message saying what configs are allowed.
for key,value in key_to_value.items():
if key != 'name':
if not key in self.config:
configs = ' '.join([ ('{0}->"{1}"'.format(x,y) if isinstance(y, str)
else '{0}->{1}'.format(x,y))
for x,y in self.config.items() ])
raise RuntimeError("Configuration value {0}={1} was not "
"expected in layer of type {2}; allowed "
"configs with their defaults: {3}"
.format(key, value, self.layer_type, configs))
for key,value in key_to_value.items():
if key != 'name':
assert key in self.config # we checked above.
self.config[key] = xutils.convert_value_to_type(key,
type(self.config[key]),
value)
self.descriptors = dict()
self.descriptor_dims = dict()
# Parse Descriptors and get their dims and their 'final' string form.
# in self.descriptors[key]
for key in self.get_input_descriptor_names():
if not key in self.config:
raise RuntimeError("{0}: object of type {1} needs to override"
" get_input_descriptor_names()."
"".format(sys.argv[0], str(type(self))))
descriptor_string = self.config[key] # input string.
assert isinstance(descriptor_string, str)
desc = self.convert_to_descriptor(descriptor_string, all_layers)
desc_dim = self.get_dim_for_descriptor(desc, all_layers)
desc_norm_str = desc.str()
# desc_output_str contains the "final" component names, those that
# appear in the actual config file (i.e. not names like
# 'layer.auxiliary_output'); that's how it differs from desc_norm_str.
# Note: it's possible that the two strings might be the same in
# many, even most, cases-- it depends whether
# output_name(self, auxiliary_output)
# returns self.get_name() + '.' + auxiliary_output
# when auxiliary_output is not None.
# That's up to the designer of the layer type.
desc_output_str = self.get_string_for_descriptor(desc, all_layers)
self.descriptors[key] = {'string':desc,
'normalized-string':desc_norm_str,
'final-string':desc_output_str,
'dim':desc_dim}
# the following helps to check the code by parsing it again.
desc2 = self.convert_to_descriptor(desc_norm_str, all_layers)
desc_norm_str2 = desc2.str()
# if the following ever fails we'll have to do some debugging.
if desc_norm_str != desc_norm_str2:
raise RuntimeError("Likely code error: '{0}' != '{1}'"
"".format(desc_norm_str, desc_norm_str2))
def str(self):
"""Converts 'this' to a string which could be printed to
an xconfig file; in xconfig_to_configs.py we actually expand all the
lines to strings and write it as xconfig.expanded as a reference
(so users can see any defaults).
"""
list_of_entries = [ '{0} name={1}'.format(self.layer_type, self.name) ]
for key, value in sorted(self.config.items()):
if isinstance(value, str) and re.search('=', value):
# the value is a string that contains an '=' sign, so we need to
# enclose it in double-quotes, otherwise we woudldn't be able to
# parse from that output.
if re.search('"', value):
print("Warning: config '{0}={1}' contains both double-quotes "
"and equals sign; it will not be possible to parse it "
"from the file.".format(key, value), file=sys.stderr)
list_of_entries.append('{0}="{1}"'.format(key, value))
else:
list_of_entries.append('{0}={1}'.format(key, value))
return ' '.join(list_of_entries)
def __str__(self):
return self.str()
def normalize_descriptors(self):
"""Converts any config variables in self.config which correspond to
Descriptors, into a 'normalized form' derived from parsing them as
Descriptors, replacing things like [-1] with the actual layer names,
and regenerating them as strings. We stored this when the object was
initialized, in self.descriptors; this function just copies them back
to the config.
"""
for key, desc_str_dict in self.descriptors.items():
self.config[key] = desc_str_dict['normalized-string']
def convert_to_descriptor(self, descriptor_string, all_layers):
"""Convenience function intended to be called from child classes,
converts a string representing a descriptor ('descriptor_string')
into an object of type Descriptor, and returns it. It needs 'self' and
'all_layers' (where 'all_layers' is a list of objects of type
XconfigLayerBase) so that it can work out a list of the names of other
layers, and get dimensions from them.
"""
prev_names = xutils.get_prev_names(all_layers, self)
tokens = xutils.tokenize_descriptor(descriptor_string, prev_names)
pos = 0
(descriptor, pos) = xutils.parse_new_descriptor(tokens, pos, prev_names)
# note: 'pos' should point to the 'end of string' marker
# that terminates 'tokens'.
if pos != len(tokens) - 1:
raise RuntimeError("Parsing Descriptor, saw junk at end: " +
' '.join(tokens[pos:-1]))
return descriptor
def get_dim_for_descriptor(self, descriptor, all_layers):
"""Returns the dimension of a Descriptor object. This is a convenience
function used in set_configs.
"""
layer_to_dim_func = \
lambda name: xutils.get_dim_from_layer_name(all_layers, self,
name)
return descriptor.dim(layer_to_dim_func)
def get_string_for_descriptor(self, descriptor, all_layers):
"""Returns the 'final' string form of a Descriptor object,
as could be used in config files. This is a convenience function
provided for use in child classes;
"""
layer_to_string_func = \
lambda name: xutils.get_string_from_layer_name(all_layers,
self, name)
return descriptor.config_string(layer_to_string_func)
def get_name(self):
"""Returns the name of this layer, e.g. 'affine1'. It does not
necessarily correspond to a component name.
"""
return self.name
###### Functions that might be overridden by the child class: #####
def set_default_configs(self):
"""Child classes should override this.
"""
raise Exception("Child classes must override set_default_configs().")
def set_derived_configs(self):
"""This is expected to be called after set_configs and before
check_configs().
"""
if self.config['dim'] <= 0:
self.config['dim'] = self.descriptors['input']['dim']
def check_configs(self):
"""child classes should override this.
"""
pass
def get_input_descriptor_names(self):
"""This function, which may be (but usually will not have to be)
overridden by child classes, returns a list of names of the input
descriptors expected by this component. Typically this would just
return ['input'] as most layers just have one 'input'. However some
layers might require more inputs (e.g. cell state of previous LSTM layer
in Highway LSTMs). It is used in the function 'normalize_descriptors()'.
This implementation will work for layer types whose only
Descriptor-valued config is 'input'.
If a child class adds more inputs, or does not have an input
(e.g. the XconfigInputLayer), it should override this function's
implementation to something like: `return ['input', 'input2']`
"""
return [ 'input' ]
def auxiliary_outputs(self):
"""Returns a list of all auxiliary outputs that this layer supports.
These are either 'None' for the regular output, or a string
(e.g. 'projection' or 'memory_cell') for any auxiliary outputs that
the layer might provide. Most layer types will not need to override
this.
"""
return [ None ]
def output_name(self, auxiliary_output = None):
"""Called with auxiliary_output == None, this returns the component-node
name of the principal output of the layer (or if you prefer, the text
form of a descriptor that gives you such an output; such as
Append(some_node, some_other_node)).
The 'auxiliary_output' argument is a text value that is designed for
extensions to layers that have additional auxiliary outputs.
For example, to implement a highway LSTM you need the memory-cell of a
layer, so you might allow auxiliary_output='memory_cell' for such a
layer type, and it would return the component node or a suitable
Descriptor: something like 'lstm3.c_t'
"""
raise Exception("Child classes must override output_name()")
def output_dim(self, auxiliary_output = None):
"""The dimension that this layer outputs. The 'auxiliary_output'
parameter is for layer types which support auxiliary outputs.
"""
raise Exception("Child classes must override output_dim()")
def get_full_config(self):
"""This function returns lines destined for the 'full' config format, as
would be read by the C++ programs. Since the program
xconfig_to_configs.py writes several config files, this function returns
a list of pairs of the form (config_file_basename, line),
e.g. something like
[ ('init', 'input-node name=input dim=40'),
('ref', 'input-node name=input dim=40') ]
which would be written to config_dir/init.config and config_dir/ref.config.
"""
raise Exception("Child classes must override get_full_config()")
class XconfigInputLayer(XconfigLayerBase):
"""This class is for lines like
'input name=input dim=40'
or
'input name=ivector dim=100'
in the config file.
"""
def __init__(self, first_token, key_to_value, prev_names = None):
assert first_token == 'input'
XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names)
def set_default_configs(self):
self.config = { 'dim': -1}
def check_configs(self):
if self.config['dim'] <= 0:
raise RuntimeError("Dimension of input-layer '{0}'"
"should be positive.".format(self.name))
def get_input_descriptor_names(self):
return [] # there is no 'input' field in self.config.
def output_name(self, auxiliary_outputs = None):
# there are no auxiliary outputs as this layer will just pass the input
assert auxiliary_outputs is None
return self.name
def output_dim(self, auxiliary_outputs = None):
# there are no auxiliary outputs as this layer will just pass the input
assert auxiliary_outputs is None
return self.config['dim']
def get_full_config(self):
# unlike other layers the input layers need to be printed in
# 'init.config' (which initializes the neural network prior to the LDA)
ans = []
for config_name in [ 'init', 'ref', 'final' ]:
ans.append( (config_name,
'input-node name={0} dim={1}'.format(self.name,
self.config['dim'])))
return ans
class XconfigTrivialOutputLayer(XconfigLayerBase):
"""This class is for lines like
'output name=output input=Append(input@-1, input@0, input@1, ReplaceIndex(ivector, t, 0))'
This is for outputs that are not really output "layers"
(there is no affine transform or nonlinearity), they just directly map to an
output-node in nnet3.
"""
def __init__(self, first_token, key_to_value, prev_names = None):
assert first_token == 'output'
XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names)
def set_default_configs(self):
# note: self.config['input'] is a descriptor, '[-1]' means output
# the most recent layer.
self.config = { 'input':'[-1]' }
def check_configs(self):
pass # nothing to check; descriptor-parsing can't happen in this function.
def output_name(self, auxiliary_outputs = None):
# there are no auxiliary outputs as this layer will just pass the output
# of the previous layer
assert auxiliary_outputs is None
return self.name
def output_dim(self, auxiliary_outputs = None):
assert auxiliary_outputs is None
# note: each value of self.descriptors is (descriptor, dim, normalized-string, output-string).
return self.descriptors['input']['dim']
def get_full_config(self):
# the input layers need to be printed in 'init.config' (which
# initializes the neural network prior to the LDA), in 'ref.config',
# which is a version of the config file used for getting left and right
# context (it doesn't read anything for the LDA-like transform and/or
# presoftmax-prior-scale components)
# In 'full.config' we write everything, this is just for reference,
# and also for cases where we don't use the LDA-like transform.
ans = []
# note: each value of self.descriptors is (descriptor, dim,
# normalized-string, output-string).
# by 'output-string' we mean a string that can appear in
# config-files, i.e. it contains the 'final' names of nodes.
descriptor_final_str = self.descriptors['input']['final-string']
for config_name in ['init', 'ref', 'final' ]:
ans.append( (config_name,
'output-node name={0} input={1}'.format(
self.name, descriptor_final_str)))
return ans
class XconfigOutputLayer(XconfigLayerBase):
"""This class is for lines like
'output-layer name=output dim=4257 input=Append(input@-1, input@0, input@1, ReplaceIndex(ivector, t, 0))'
By default this includes a log-softmax component. The parameters are
initialized to zero, asthis is best for output layers.
Parameters of the class, and their defaults:
input='[-1]' : Descriptor giving the input of the layer.
dim=None : Output dimension of layer, will normally equal the number of pdfs.
include-log-softmax=true : setting it to false will omit the
log-softmax component- useful for chain models.
objective-type=linear : the only other choice currently is
'quadratic', for use in regression problems
learning-rate-factor=1.0 : Learning rate factor for the final
affine component, multiplies the standard learning rate. normally
you'll leave this as-is, but for xent regularization output layers
for chain models you'll want to set
learning-rate-factor=(0.5/xent_regularize),
normally learning-rate-factor=5.0 since xent_regularize is
normally 0.1.
presoftmax-scale-file=None : If set, a filename for a vector that
will be used to scale the output of the affine component before the
log-softmax (if include-log-softmax=true), or before the output
(if not). This is helpful to avoid instability in training due to
some classes having much more data than others. The way we normally
create this vector is to take the priors of the classes to the
power -0.25 and rescale them so the average is 1.0. This factor
-0.25 is referred to as presoftmax_prior_scale_power in scripts. In
the scripts this would normally be set to
config_dir/presoftmax_prior_scale.vec
output-delay=0 : Can be used to shift the frames on the output, equivalent
to delaying labels by this many frames (positive value increases latency
in online decoding but may help if you're using unidirectional LSTMs.
ng-affine-options='' : Can be used supply non-default options to the affine
layer (intended for the natural gradient but can be an arbitrary string
to be added to the config line. e.g. 'update-period=2'.).
"""
def __init__(self, first_token, key_to_value, prev_names = None):
assert first_token == 'output-layer'
XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names)
def set_default_configs(self):
# note: self.config['input'] is a descriptor, '[-1]' means output
# the most recent layer.
self.config = {'input' : '[-1]',
'dim' : -1,
'include-log-softmax' : True,
# this would be false for chain models
'objective-type' : 'linear',
# see Nnet::ProcessOutputNodeConfigLine in
# nnet-nnet.cc for other options
'learning-rate-factor' : 1.0,
'presoftmax-scale-file' : '',
# used in DNN (not RNN) training when using
# frame-level objfns,
'max-change' : 1.5,
'param-stddev' : 0.0,
'bias-stddev' : 0.0,
'output-delay' : 0,
'ng-affine-options' : ''
}
def check_configs(self):
if self.config['dim'] <= -1:
raise RuntimeError("In output-layer, dim has invalid value {0}"
"".format(self.config['dim']))
if self.config['objective-type'] != 'linear' and \
self.config['objective_type'] != 'quadratic':
raise RuntimeError("In output-layer, objective-type has"
" invalid value {0}"
"".format(self.config['objective-type']))
if self.config['learning-rate-factor'] <= 0.0:
raise RuntimeError("In output-layer, learning-rate-factor has"
" invalid value {0}"
"".format(self.config['learning-rate-factor']))
# you cannot access the output of this layer from other layers... see
# comment in output_name for the reason why.
def auxiliary_outputs(self):
return []
def output_name(self, auxiliary_outputs = None):
# Note: nodes of type output-node in nnet3 may not be accessed in
# Descriptors, so calling this with auxiliary_outputs=None doesn't
# make sense. But it might make sense to make the output of the softmax
# layer and/or the output of the affine layer available as inputs to
# other layers, in some circumstances.
# we'll implement that when it's needed.
raise RuntimeError("Outputs of output-layer may not be used by other"
" layers")
def output_dim(self, auxiliary_output = None):
# see comment in output_name().
raise RuntimeError("Outputs of output-layer may not be used by other"
" layers")
def get_full_config(self):
ans = []
# note: each value of self.descriptors is (descriptor, dim,
# normalized-string, output-string).
# by 'descriptor_final_string' we mean a string that can appear in
# config-files, i.e. it contains the 'final' names of nodes.
descriptor_final_string = self.descriptors['input']['final-string']
input_dim = self.descriptors['input']['dim']
output_dim = self.config['dim']
objective_type = self.config['objective-type']
learning_rate_factor = self.config['learning-rate-factor']
include_log_softmax = self.config['include-log-softmax']
presoftmax_scale_file = self.config['presoftmax-scale-file']
param_stddev = self.config['param-stddev']
bias_stddev = self.config['bias-stddev']
output_delay = self.config['output-delay']
max_change = self.config['max-change']
ng_affine_options = self.config['ng-affine-options']
# note: ref.config is used only for getting the left-context and
# right-context of the network;
# final.config is where we put the actual network definition.
for config_name in [ 'ref', 'final' ]:
# First the affine node.
line = ('component name={0}.affine'
' type=NaturalGradientAffineComponent'
' input-dim={1}'
' output-dim={2}'
' param-stddev={3}'
' bias-stddev={4}'
' max-change={5} {6} '
''.format(self.name, input_dim, output_dim,
param_stddev, bias_stddev, max_change, ng_affine_options) +
('learning-rate-factor={0} '.format(learning_rate_factor)
if learning_rate_factor != 1.0 else ''))
ans.append((config_name, line))
line = ('component-node name={0}.affine'
' component={0}.affine input={1}'
''.format(self.name, descriptor_final_string))
ans.append((config_name, line))
cur_node = '{0}.affine'.format(self.name)
if presoftmax_scale_file is not '' and config_name == 'final':
# don't use the presoftmax-scale in 'ref.config' since that
# file won't exist at the time we evaluate it.
# (ref.config is used to find the left/right context).
line = ('component name={0}.fixed-scale'
' type=FixedScaleComponent scales={1}'
''.format(self.name, presoftmax_scale_file))
ans.append((config_name, line))
line = ('component-node name={0}.fixed-scale'
' component={0}.fixed-scale input={1}'
''.format(self.name, cur_node))
ans.append((config_name, line))
cur_node = '{0}.fixed-scale'.format(self.name)
if include_log_softmax:
line = ('component name={0}.log-softmax'
' type=LogSoftmaxComponent dim={1}'
''.format(self.name, output_dim))
ans.append((config_name, line))
line = ('component-node name={0}.log-softmax'
' component={0}.log-softmax input={1}'
''.format(self.name, cur_node))
ans.append((config_name, line))
cur_node = '{0}.log-softmax'.format(self.name)
if output_delay != 0:
cur_node = 'Offset({0}, {1})'.format(cur_node, output_delay)
line = ('output-node name={0} input={1}'.format(self.name, cur_node))
ans.append((config_name, line))
return ans
# This class is for parsing lines like
# 'relu-renorm-layer name=layer1 dim=1024 input=Append(-3,0,3)'
# or:
# 'sigmoid-layer name=layer1 dim=1024 input=Append(-3,0,3)'
# which specify addition of an affine component and a sequence of non-linearities.
# Here, the name of the layer itself dictates the sequence of nonlinearities
# that are applied after the affine component; the name should contain some
# combination of 'relu', 'renorm', 'sigmoid' and 'tanh',
# and these nonlinearities will be added along with the affine component.
#
# The dimension specified is the output dim; the input dim is worked out from the input descriptor.
# This class supports only nonlinearity types that do not change the dimension; we can create
# another layer type to enable the use p-norm and similar dimension-reducing nonlinearities.
#
# See other configuration values below.
#
# Parameters of the class, and their defaults:
# input='[-1]' [Descriptor giving the input of the layer.]
# dim=None [Output dimension of layer, e.g. 1024]
# self-repair-scale=1.0e-05 [Affects relu, sigmoid and tanh layers.]
#
class XconfigBasicLayer(XconfigLayerBase):
def __init__(self, first_token, key_to_value, prev_names = None):
# Here we just list some likely combinations.. you can just add any
# combinations you want to use, to this list.
assert first_token in [ 'relu-layer', 'relu-renorm-layer', 'sigmoid-layer',
'tanh-layer' ]
XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names)
def set_default_configs(self):
# note: self.config['input'] is a descriptor, '[-1]' means output
# the most recent layer.
self.config = { 'input':'[-1]',
'dim':-1,
'max-change' : 0.75,
'self-repair-scale' : 1.0e-05,
'target-rms' : 1.0,
'ng-affine-options' : ''}
def check_configs(self):
if self.config['dim'] < 0:
raise RuntimeError("dim has invalid value {0}".format(self.config['dim']))
if self.config['self-repair-scale'] < 0.0 or self.config['self-repair-scale'] > 1.0:
raise RuntimeError("self-repair-scale has invalid value {0}"
.format(self.config['self-repair-scale']))
if self.config['target-rms'] < 0.0:
raise RuntimeError("target-rms has invalid value {0}"
.format(self.config['target-rms']))
def output_name(self, auxiliary_output=None):
# at a later stage we might want to expose even the pre-nonlinearity
# vectors
assert auxiliary_output == None
split_layer_name = self.layer_type.split('-')
assert split_layer_name[-1] == 'layer'
last_nonlinearity = split_layer_name[-2]
# return something like: layer3.renorm
return '{0}.{1}'.format(self.name, last_nonlinearity)
def output_dim(self, auxiliary_output = None):
output_dim = self.config['dim']
# If not set, the output-dim defaults to the input-dim.
if output_dim <= 0:
output_dim = self.descriptors['input']['dim']
return output_dim
def get_full_config(self):
ans = []
config_lines = self._generate_config()
for line in config_lines:
for config_name in ['ref', 'final']:
# we do not support user specified matrices in this layer
# so 'ref' and 'final' configs are the same.
ans.append((config_name, line))
return ans
def _generate_config(self):
split_layer_name = self.layer_type.split('-')
assert split_layer_name[-1] == 'layer'
nonlinearities = split_layer_name[:-1]
# by 'descriptor_final_string' we mean a string that can appear in
# config-files, i.e. it contains the 'final' names of nodes.
input_desc = self.descriptors['input']['final-string']
input_dim = self.descriptors['input']['dim']
# the child classes e.g. tdnn might want to process the input
# before adding the other components
return self._add_components(input_desc, input_dim, nonlinearities)
def _add_components(self, input_desc, input_dim, nonlinearities):
output_dim = self.output_dim()
self_repair_scale = self.config['self-repair-scale']
target_rms = self.config['target-rms']
max_change = self.config['max-change']
ng_affine_options = self.config['ng-affine-options']
configs = []
# First the affine node.
line = ('component name={0}.affine'
' type=NaturalGradientAffineComponent'
' input-dim={1}'
' output-dim={2}'
' max-change={3}'
' {4}'
''.format(self.name, input_dim, output_dim,
max_change, ng_affine_options))
configs.append(line)
line = ('component-node name={0}.affine'
' component={0}.affine input={1}'
''.format(self.name, input_desc))
configs.append(line)
cur_node = '{0}.affine'.format(self.name)
for nonlinearity in nonlinearities:
if nonlinearity == 'relu':
line = ('component name={0}.{1}'
' type=RectifiedLinearComponent dim={2}'
' self-repair-scale={3}'
''.format(self.name, nonlinearity, output_dim,
self_repair_scale))
elif nonlinearity == 'sigmoid':
line = ('component name={0}.{1}'
' type=SigmoidComponent dim={2}'
' self-repair-scale={3}'
''.format(self.name, nonlinearity, output_dim,
self_repair_scale))
elif nonlinearity == 'tanh':
line = ('component name={0}.{1}'
' type=TanhComponent dim={2}'
' self-repair-scale={3}'
''.format(self.name, nonlinearity, output_dim,
self_repair_scale))
elif nonlinearity == 'renorm':
line = ('component name={0}.{1}'
' type=NormalizeComponent dim={2}'
' target-rms={3}'
''.format(self.name, nonlinearity, output_dim,
target_rms))
else:
raise RuntimeError("Unknown nonlinearity type: {0}"
.format(nonlinearity))
configs.append(line)
line = ('component-node name={0}.{1}'
' component={0}.{1} input={2}'
''.format(self.name, nonlinearity, cur_node))
configs.append(line)
cur_node = '{0}.{1}'.format(self.name, nonlinearity)
return configs
# This class is for lines like
# 'fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=foo/bar/lda.mat'
#
# The output dimension of the layer may be specified via 'dim=xxx', but if not specified,
# the dimension defaults to the same as the input. Note: we don't attempt to read that
# file at the time the config is created, because in the recipes, that file is created
# after the config files.
#
# See other configuration values below.
#
# Parameters of the class, and their defaults:
# input='[-1]' [Descriptor giving the input of the layer.]
# dim=None [Output dimension of layer; defaults to the same as the input dim.]
# affine-transform-file='' [Must be specified.]
#
class XconfigFixedAffineLayer(XconfigLayerBase):
def __init__(self, first_token, key_to_value, prev_names = None):
assert first_token == 'fixed-affine-layer'
XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names)
def set_default_configs(self):
# note: self.config['input'] is a descriptor, '[-1]' means output
# the most recent layer.
self.config = { 'input':'[-1]',
'dim':-1,
'affine-transform-file':''}
def check_configs(self):
if self.config['affine-transform-file'] is None:
raise RuntimeError("affine-transform-file must be set.")
def output_name(self, auxiliary_output = None):
# Fixed affine layer computes only one vector, there are no intermediate
# vectors.
assert auxiliary_output == None
return self.name
def output_dim(self, auxiliary_output = None):
output_dim = self.config['dim']
# If not set, the output-dim defaults to the input-dim.
if output_dim <= 0:
output_dim = self.descriptors['input']['dim']
return output_dim
def get_full_config(self):
ans = []
# note: each value of self.descriptors is (descriptor, dim,
# normalized-string, output-string).
# by 'descriptor_final_string' we mean a string that can appear in
# config-files, i.e. it contains the 'final' names of nodes.
descriptor_final_string = self.descriptors['input']['final-string']
input_dim = self.descriptors['input']['dim']
output_dim = self.output_dim()
transform_file = self.config['affine-transform-file']
# to init.config we write an output-node with the name 'output' and
# with a Descriptor equal to the descriptor that's the input to this
# layer. This will be used to accumulate stats to learn the LDA transform.
line = 'output-node name=output input={0}'.format(descriptor_final_string)
ans.append(('init', line))
# write the 'real' component to final.config
line = 'component name={0} type=FixedAffineComponent matrix={1}'.format(
self.name, transform_file)
ans.append(('final', line))
# write a random version of the component, with the same dims, to ref.config
line = 'component name={0} type=FixedAffineComponent input-dim={1} output-dim={2}'.format(
self.name, input_dim, output_dim)
ans.append(('ref', line))
# the component-node gets written to final.config and ref.config.
line = 'component-node name={0} component={0} input={1}'.format(
self.name, descriptor_final_string)
ans.append(('final', line))
ans.append(('ref', line))
return ans
# This class is for lines like
# 'affine-layer name=affine input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0))'
#
# The output dimension of the layer may be specified via 'dim=xxx', but if not specified,
# the dimension defaults to the same as the input. Note: we don't attempt to read that
# file at the time the config is created, because in the recipes, that file is created
# after the config files.
#
# See other configuration values below.
#
# Parameters of the class, and their defaults:
# input='[-1]' [Descriptor giving the input of the layer.]
# dim=None [Output dimension of layer; defaults to the same as the input dim.]
#
class XconfigAffineLayer(XconfigLayerBase):
def __init__(self, first_token, key_to_value, prev_names = None):
assert first_token == 'affine-layer'
XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names)
def set_default_configs(self):
# note: self.config['input'] is a descriptor, '[-1]' means output
# the most recent layer.
# use None for optional parameters as we want to default to the C++ defaults
# C++ component provides more options but I will just expose these for now
# Note : The type of the parameter is determined based on the value assigned
# so please use decimal point if your parameter is a float
self.config = { 'input' : '[-1]',
'dim' : -1,
'param-stddev' : -1.0, # this has to be initialized to 1/sqrt(input_dim)
'bias-stddev' : 1.0,
'bias-mean' : 0.0,
'max-change' : 0.75,
'learning-rate-factor' : 1.0,
'ng-affine-options' : ''}
def set_derived_configs(self):
super(XconfigAffineLayer, self).set_derived_configs()
if self.config['param-stddev'] < 0:
self.config['param-stddev'] = 1.0 / math.sqrt(self.descriptors['input']['dim'])
def check_configs(self):
if self.config['dim'] <= 0:
raise RuntimeError("dim specified is invalid")
def output_name(self, auxiliary_output = None):
# affine layer computes only one vector, there are no intermediate
# vectors.
assert auxiliary_output == None
return self.name
def output_dim(self, auxiliary_output = None):
output_dim = self.config['dim']
# If not set, the output-dim defaults to the input-dim.
if output_dim <= 0:
output_dim = self.descriptors['input']['dim']
return output_dim
def get_full_config(self):
ans = []
# note: each value of self.descriptors is (descriptor, dim,
# normalized-string, output-string).
# by 'descriptor_final_string' we mean a string that can appear in
# config-files, i.e. it contains the 'final' names of nodes.
descriptor_final_string = self.descriptors['input']['final-string']
input_dim = self.descriptors['input']['dim']
output_dim = self.output_dim()
option_string=''
for key in ['param-stddev', 'bias-stddev', 'bias-mean', 'max-change']:
option_string += ' {0}={1}'.format(key, self.config[key])
option_string += self.config['ng-affine-options']
conf_lines = []
# write the 'real' component to final.config
conf_lines.append('component name={n} type=NaturalGradientAffineComponent '
'input-dim={i} output-dim={o} {opts}'.format(n = self.name,
i = input_dim,
o = output_dim,
opts = option_string))
# the component-node gets written to final.config and ref.config.
conf_lines.append('component-node name={0} component={0} input={1}'.format(self.name,
descriptor_final_string))
# the config is same for both final and ref configs
for conf_name in ['final', 'ref']:
for line in conf_lines:
ans.append((conf_name, line))
return ans
def test_layers():
# for some config lines that should be printed the same way as they
# are read, check that this is the case.
for x in [ 'input name=input dim=30' ]:
assert str(config_line_to_object(x, [])) == x
|
the-stack_0_7520 | import logging
log = logging.getLogger(__name__)
from qcodes import VisaInstrument
import qcodes.utils.validators as vals
class DG645(VisaInstrument):
"""Qcodes driver for SRS DG645 digital delay generator.
"""
CHANNEL_MAPPING = {
'T0': 0, 'T1': 1, 'A': 2, 'B': 3, 'C': 4,
'D': 5, 'E': 6, 'F': 7, 'G': 8, 'H': 9
}
OUTPUT_MAPPING = {'T0': 0, 'AB': 1, 'CD': 2, 'EF': 3, 'GH': 4}
PRESCALE_MAPPING = {'trig': 0, 'AB': 1, 'CD': 2, 'EF': 3, 'GH': 4}
TRIGGER_MAPPING = {
'internal': 0,
'ext_rising': 1,
'ext_falling': 2,
'single_ext_rising': 3,
'single_ext_falling': 4,
'single': 5,
'line': 6,
}
POLARITY_MAPPING = {'-': 0, '+': 1}
DISPLAY_MAPPING = {
'trig_rate': 0,
'trig_thresh': 1,
'trig_single_shot': 2,
'trig_line': 3,
'advanced_trig_enable': 4,
'trig_holdoff': 5,
'prescale_config': 6,
'burst_mode': 7,
'burst_delay': 8,
'burst_count': 9,
'burst_period': 10,
'channel_delay': 11,
'channel_output_levels': 12,
'channel_output_polarity': 13,
'burst_T0_config': 14
}
def __init__(self, name, address, **kwargs):
super().__init__(name, address, terminator='\r\n', timeout=10, **kwargs)
self.add_parameter('trig_holdoff',
label='Trigger holdoff',
unit='s',
get_cmd='HOLD?',
get_parser=float,
set_cmd='HOLD {}'
)
# Prescale parameters
for channel, idx in self.PRESCALE_MAPPING.items():
if idx > 0:
self.add_parameter(
f'phase_{channel}',
label=f'{channel} prescale phase factor {k}',
get_cmd=f'PHAS?{idx}',
get_parser=int,
set_cmd=f'PHAS {idx},{{}}',
vals=vals.Ints(min_value=0),
docstring="""\
The prescale phase factor determines the phase at which the associated output is
enabled. The output is enabled when the prescaler counter equals the phase
factor.
"""
)
self.add_parameter(
f'prescale_{channel}',
label=f'{channel} prescale factor',
get_cmd=f'PRES?{idx}',
get_parser=int,
set_cmd=f'PRES {idx},{{}}',
vals=vals.Ints(min_value=0),
docstring="""\
A prescaler on the trigger input enables one to generate
delay cycles at a sub-multiple of the trigger input frequency.
"""
)
# Trigger parameters
self.add_parameter(
'trigger_level',
label='Trigger level',
unit='V',
get_cmd='TLVL?',
get_parser=float,
set_cmd='TLVL {}',
vals=vals.Numbers()
)
self.add_parameter(
'trigger_rate',
label='Trigger rate',
unit='Hz',
get_cmd='TRAT?',
get_parser=float,
set_cmd='TRAT {}',
vals=vals.Numbers(min_value=0)
)
self.add_parameter(
'trigger_source',
label='Trigger source',
get_cmd=self._get_trigger_source,
get_parser=str,
set_cmd=self._set_trigger_source,
vals=vals.Enum(*tuple(self.TRIGGER_MAPPING))
)
# Burst parameters
self.add_parameter(
'burst_count',
label='Burst count',
get_cmd='BURC?',
get_parser=int,
set_cmd='BURC {}',
vals=vals.Ints(min_value=0)
)
self.add_parameter(
'burst_delay',
label='Burst delay',
unit='s',
get_cmd='BURD?',
get_parser=float,
set_cmd='BURD {}',
vals=vals.Numbers(min_value=0)
)
self.add_parameter(
'burst_period',
label='Burst period',
unit='s',
get_cmd='BURP?',
get_parser=float,
set_cmd='BURC {}',
vals=vals.Numbers(min_value=100e-9, max_value=2000-10e-9)
)
self.add_parameter(
'burst_T0_config',
label='Burst T0 configuration',
get_cmd='BURT?',
get_parser=int,
set_cmd='BURT {}',
vals=vals.Enum(0,1)
)
# Channel parameters
for ch, idx in self.CHANNEL_MAPPING.items():
if idx > 1:
self.add_parameter(
f'delay_{ch}',
label=f'{ch} delay',
unit='s',
get_cmd=f'DLAY?{idx}',
get_parser=str,
set_cmd=lambda src_delay, channel=ch: self._set_delay(src_delay, channel),
vals=vals.Strings(),
docstring="""\
Set/query they delay of this channel relative to another.
Arguments/returned values strings of the form
'{index_of_other_channel},{delay_in_seconds}'. For example, '2,+0.001'
indicates that this channel is delayed from channel A by 1 ms, since
self.CHANNEL_MAPPING['A'] == 2.
"""
)
self.add_parameter(
f'channel_link_{ch}',
label=f'Channel linked to {ch}',
get_cmd=f'LINK?{idx}',
get_parser=int,
set_cmd=lambda target, source=ch: self._set_link(target, source),
vals=vals.Enum(*tuple(k for k in self.CHANNEL_MAPPING if k != 'T1'))
)
# Output parameters
for out, idx in self.OUTPUT_MAPPING.items():
self.add_parameter(
f'amp_out_{out}',
label=f'Output {out} amplitude',
unit='V',
get_cmd=f'LAMP?{idx}',
get_parser=float,
set_cmd=f'LAMP {idx},{{}}',
vals=vals.Numbers()
)
self.add_parameter(
f'offset_out_{out}',
label=f'Output {out} offset',
unit='V',
get_cmd=f'LOFF?{idx}',
get_parser=float,
set_cmd=f'LOFF {idx},{{}}',
vals=vals.Numbers()
)
self.add_parameter(
f'polarity_out_{out}',
label=f'Output {out} polarity',
get_cmd=f'LPOL?{idx}',
get_parser=int,
set_cmd=f'LPOL {idx},{{}}',
vals=vals.Enum(0,1),
docstring='0 -> negative polarity, 1 -> positive polarity.'
)
self.snapshot(update=True)
self.connect_message()
def self_calibrate(self) -> None:
"""Run auto-calibration routine.
"""
self.write('*CAL?')
self.wait()
def self_test(self) -> None:
"""Run self-test routine.
"""
self.write('*TST?')
self.wait()
def reset(self) -> None:
"""Reset instrument.
"""
log.info(f'Resetting {self.name}.')
self.write('*RST')
def save_settings(self, location: int) -> None:
"""Save instrument settings to given location.
Args:
location: Location to which to save the settings (in [1..9]).
"""
log.info(f'Saving instrument settings to location {location}.')
self.write(f'*SAV {location}')
def trigger(self) -> None:
"""Initiates a single trigger if instrument is in single shot mode.
"""
self.write('*TRG')
def wait(self) -> None:
"""Wait for all prior commands to execute before continuing.
"""
self.write('*WAI')
def local(self) -> None:
"""Go to local.
"""
self.write('LCAL')
def remote(self) -> None:
"""Go to remote.
"""
self.write('REMT')
def _set_trigger_source(self, src: str) -> None:
self.write(f'TSRC {self.TRIGGER_MAPPING[src]}')
def _get_trigger_source(self) -> str:
response = self.ask('TSRC?')
keys = self.TRIGGER_MAPPING.keys()
values = self.TRIGGER_MAPPING.values()
return list(keys)[list(values).index(int(response))]
def _set_delay(self, src_delay: str, target: str) -> None:
source, delay = [s.strip() for s in src_delay.split(',')]
self.write('DLAY {},{},{}'.format(self.CHANNEL_MAPPING[target],
self.CHANNEL_MAPPING[source],
delay))
def _set_link(self, target: str, source: str) -> None:
self.write('LINK {},{}'.format(self.CHANNEL_MAPPING[target],
self.CHANNEL_MAPPING[source]))
|
the-stack_0_7522 | #!/usr/bin/env python3
"""Combine logs from multiple konjocoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile.
If no argument is provided, the most recent test directory will be used."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
import tempfile
# N.B.: don't import any local modules here - this script must remain executable
# without the parent module installed.
# Should match same symbol in `test_framework.test_framework`.
TMPDIR_PREFIX = "konjocoin_func_test_"
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'testdir', nargs='?', default='',
help=('temporary test directory to combine logs from. '
'Defaults to the most recent'))
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args = parser.parse_args()
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
testdir = args.testdir or find_latest_test_dir()
if not testdir:
print("No test directories found")
sys.exit(1)
if not args.testdir:
print("Opening latest test directory: {}".format(testdir), file=sys.stderr)
log_events = read_logs(testdir)
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def find_latest_test_dir():
"""Returns the latest tmpfile test directory prefix."""
tmpdir = tempfile.gettempdir()
def join_tmp(basename):
return os.path.join(tmpdir, basename)
def is_valid_test_tmpdir(basename):
fullpath = join_tmp(basename)
return (
os.path.isdir(fullpath)
and basename.startswith(TMPDIR_PREFIX)
and os.access(fullpath, os.R_OK)
)
testdir_paths = [
join_tmp(name) for name in os.listdir(tmpdir) if is_valid_test_tmpdir(name)
]
return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
timestamp = time_match.group()
if time_match.group(1) is None:
# timestamp does not have microseconds. Add zeroes.
timestamp_micro = timestamp.replace("Z", ".000000Z")
line = line.replace(timestamp, timestamp_micro)
timestamp = timestamp_micro
event = line
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
# Add the line. Prefix with space equivalent to the source + timestamp so log lines are aligned
event += " " + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
lines = event.event.splitlines()
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, lines[0], colors["reset"]))
if len(lines) > 1:
for line in lines[1:]:
print("{0}{1}{2}".format(colors[event.source.rstrip()], line, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
|
the-stack_0_7523 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT library to process data for classification task."""
import collections
import csv
import importlib
import json
import os
from absl import logging
import tensorflow as tf
import tensorflow_datasets as tfds
from official.nlp.bert import tokenization
class InputExample(object):
"""A single training/test example for simple seq regression/classification."""
def __init__(self,
guid,
text_a,
text_b=None,
label=None,
weight=None,
example_id=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string for classification, float for regression. The
label of the example. This should be specified for train and dev
examples, but not for test examples.
weight: (Optional) float. The weight of the example to be used during
training.
example_id: (Optional) int. The int identification number of example in
the corpus.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.weight = weight
self.example_id = example_id
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True,
weight=None,
example_id=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
self.weight = weight
self.example_id = example_id
class DataProcessor(object):
"""Base class for converters for seq regression/classification datasets."""
def __init__(self, process_text_fn=tokenization.convert_to_unicode):
self.process_text_fn = process_text_fn
self.is_regression = False
self.label_type = None
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@staticmethod
def get_processor_name():
"""Gets the string identifier of the processor."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.io.gfile.GFile(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_jsonl(cls, input_file):
"""Reads a json line file."""
with tf.io.gfile.GFile(input_file, "r") as f:
lines = []
for json_str in f:
lines.append(json.loads(json_str))
return lines
def featurize_example(self, *kargs, **kwargs):
"""Converts a single `InputExample` into a single `InputFeatures`."""
return convert_single_example(*kargs, **kwargs)
class DefaultGLUEDataProcessor(DataProcessor):
"""Processor for the SuperGLUE dataset."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples_tfds("train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples_tfds("validation")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples_tfds("test")
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
raise NotImplementedError()
class AxProcessor(DataProcessor):
"""Processor for the AX dataset (GLUE diagnostics dataset)."""
def get_train_examples(self, data_dir):
"""See base class."""
train_mnli_dataset = tfds.load(
"glue/mnli", split="train", try_gcs=True).as_numpy_iterator()
return self._create_examples_tfds(train_mnli_dataset, "train")
def get_dev_examples(self, data_dir):
"""See base class."""
val_mnli_dataset = tfds.load(
"glue/mnli", split="validation_matched",
try_gcs=True).as_numpy_iterator()
return self._create_examples_tfds(val_mnli_dataset, "validation")
def get_test_examples(self, data_dir):
"""See base class."""
test_ax_dataset = tfds.load(
"glue/ax", split="test", try_gcs=True).as_numpy_iterator()
return self._create_examples_tfds(test_ax_dataset, "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
@staticmethod
def get_processor_name():
"""See base class."""
return "AX"
def _create_examples_tfds(self, dataset, set_type):
"""Creates examples for the training/dev/test sets."""
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "contradiction"
text_a = self.process_text_fn(example["hypothesis"])
text_b = self.process_text_fn(example["premise"])
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class ColaProcessor(DefaultGLUEDataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "COLA"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/cola", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "0"
text_a = self.process_text_fn(example["sentence"])
if set_type != "test":
label = str(example["label"])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=None, label=label, weight=None))
return examples
class ImdbProcessor(DataProcessor):
"""Processor for the IMDb dataset."""
def get_labels(self):
return ["neg", "pos"]
def get_train_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, "train"))
def get_dev_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, "test"))
@staticmethod
def get_processor_name():
"""See base class."""
return "IMDB"
def _create_examples(self, data_dir):
"""Creates examples."""
examples = []
for label in ["neg", "pos"]:
cur_dir = os.path.join(data_dir, label)
for filename in tf.io.gfile.listdir(cur_dir):
if not filename.endswith("txt"):
continue
if len(examples) % 1000 == 0:
logging.info("Loading dev example %d", len(examples))
path = os.path.join(cur_dir, filename)
with tf.io.gfile.GFile(path, "r") as f:
text = f.read().strip().replace("<br />", " ")
examples.append(
InputExample(
guid="unused_id", text_a=text, text_b=None, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def __init__(self,
mnli_type="matched",
process_text_fn=tokenization.convert_to_unicode):
super(MnliProcessor, self).__init__(process_text_fn)
self.dataset = tfds.load("glue/mnli", try_gcs=True)
if mnli_type not in ("matched", "mismatched"):
raise ValueError("Invalid `mnli_type`: %s" % mnli_type)
self.mnli_type = mnli_type
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples_tfds("train")
def get_dev_examples(self, data_dir):
"""See base class."""
if self.mnli_type == "matched":
return self._create_examples_tfds("validation_matched")
else:
return self._create_examples_tfds("validation_mismatched")
def get_test_examples(self, data_dir):
"""See base class."""
if self.mnli_type == "matched":
return self._create_examples_tfds("test_matched")
else:
return self._create_examples_tfds("test_mismatched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
@staticmethod
def get_processor_name():
"""See base class."""
return "MNLI"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/mnli", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "contradiction"
text_a = self.process_text_fn(example["hypothesis"])
text_b = self.process_text_fn(example["premise"])
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class MrpcProcessor(DefaultGLUEDataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "MRPC"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/mrpc", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "0"
text_a = self.process_text_fn(example["sentence1"])
text_b = self.process_text_fn(example["sentence2"])
if set_type != "test":
label = str(example["label"])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class PawsxProcessor(DataProcessor):
"""Processor for the PAWS-X data set."""
supported_languages = ["de", "en", "es", "fr", "ja", "ko", "zh"]
def __init__(self,
language="en",
process_text_fn=tokenization.convert_to_unicode):
super(PawsxProcessor, self).__init__(process_text_fn)
if language == "all":
self.languages = PawsxProcessor.supported_languages
elif language not in PawsxProcessor.supported_languages:
raise ValueError("language %s is not supported for PAWS-X task." %
language)
else:
self.languages = [language]
def get_train_examples(self, data_dir):
"""See base class."""
lines = []
for language in self.languages:
if language == "en":
train_tsv = "train.tsv"
else:
train_tsv = "translated_train.tsv"
# Skips the header.
lines.extend(
self._read_tsv(os.path.join(data_dir, language, train_tsv))[1:])
examples = []
for i, line in enumerate(lines):
guid = "train-%d" % i
text_a = self.process_text_fn(line[1])
text_b = self.process_text_fn(line[2])
label = self.process_text_fn(line[3])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = []
for lang in PawsxProcessor.supported_languages:
lines.extend(
self._read_tsv(os.path.join(data_dir, lang, "dev_2k.tsv"))[1:])
examples = []
for i, line in enumerate(lines):
guid = "dev-%d" % i
text_a = self.process_text_fn(line[1])
text_b = self.process_text_fn(line[2])
label = self.process_text_fn(line[3])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
examples_by_lang = {k: [] for k in self.supported_languages}
for lang in self.supported_languages:
lines = self._read_tsv(os.path.join(data_dir, lang, "test_2k.tsv"))[1:]
for i, line in enumerate(lines):
guid = "test-%d" % i
text_a = self.process_text_fn(line[1])
text_b = self.process_text_fn(line[2])
label = self.process_text_fn(line[3])
examples_by_lang[lang].append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples_by_lang
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "XTREME-PAWS-X"
class QnliProcessor(DefaultGLUEDataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
@staticmethod
def get_processor_name():
"""See base class."""
return "QNLI"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/qnli", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "entailment"
text_a = self.process_text_fn(example["question"])
text_b = self.process_text_fn(example["sentence"])
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class QqpProcessor(DefaultGLUEDataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "QQP"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/qqp", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "0"
text_a = self.process_text_fn(example["question1"])
text_b = self.process_text_fn(example["question2"])
if set_type != "test":
label = str(example["label"])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class RteProcessor(DefaultGLUEDataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_labels(self):
"""See base class."""
# All datasets are converted to 2-class split, where for 3-class datasets we
# collapse neutral and contradiction into not_entailment.
return ["entailment", "not_entailment"]
@staticmethod
def get_processor_name():
"""See base class."""
return "RTE"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/rte", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "entailment"
text_a = self.process_text_fn(example["sentence1"])
text_b = self.process_text_fn(example["sentence2"])
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class SstProcessor(DefaultGLUEDataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "SST-2"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/sst2", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "0"
text_a = self.process_text_fn(example["sentence"])
if set_type != "test":
label = str(example["label"])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=None, label=label, weight=None))
return examples
class StsBProcessor(DefaultGLUEDataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def __init__(self, process_text_fn=tokenization.convert_to_unicode):
super(StsBProcessor, self).__init__(process_text_fn=process_text_fn)
self.is_regression = True
self.label_type = float
self._labels = None
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/stsb", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = 0.0
text_a = self.process_text_fn(example["sentence1"])
text_b = self.process_text_fn(example["sentence2"])
if set_type != "test":
label = self.label_type(example["label"])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
def get_labels(self):
"""See base class."""
return self._labels
@staticmethod
def get_processor_name():
"""See base class."""
return "STS-B"
class TfdsProcessor(DataProcessor):
"""Processor for generic text classification and regression TFDS data set.
The TFDS parameters are expected to be provided in the tfds_params string, in
a comma-separated list of parameter assignments.
Examples:
tfds_params="dataset=scicite,text_key=string"
tfds_params="dataset=imdb_reviews,test_split=,dev_split=test"
tfds_params="dataset=glue/cola,text_key=sentence"
tfds_params="dataset=glue/sst2,text_key=sentence"
tfds_params="dataset=glue/qnli,text_key=question,text_b_key=sentence"
tfds_params="dataset=glue/mrpc,text_key=sentence1,text_b_key=sentence2"
tfds_params="dataset=glue/stsb,text_key=sentence1,text_b_key=sentence2,"
"is_regression=true,label_type=float"
tfds_params="dataset=snli,text_key=premise,text_b_key=hypothesis,"
"skip_label=-1"
Possible parameters (please refer to the documentation of Tensorflow Datasets
(TFDS) for the meaning of individual parameters):
dataset: Required dataset name (potentially with subset and version number).
data_dir: Optional TFDS source root directory.
module_import: Optional Dataset module to import.
train_split: Name of the train split (defaults to `train`).
dev_split: Name of the dev split (defaults to `validation`).
test_split: Name of the test split (defaults to `test`).
text_key: Key of the text_a feature (defaults to `text`).
text_b_key: Key of the second text feature if available.
label_key: Key of the label feature (defaults to `label`).
test_text_key: Key of the text feature to use in test set.
test_text_b_key: Key of the second text feature to use in test set.
test_label: String to be used as the label for all test examples.
label_type: Type of the label key (defaults to `int`).
weight_key: Key of the float sample weight (is not used if not provided).
is_regression: Whether the task is a regression problem (defaults to False).
skip_label: Skip examples with given label (defaults to None).
"""
def __init__(self,
tfds_params,
process_text_fn=tokenization.convert_to_unicode):
super(TfdsProcessor, self).__init__(process_text_fn)
self._process_tfds_params_str(tfds_params)
if self.module_import:
importlib.import_module(self.module_import)
self.dataset, info = tfds.load(
self.dataset_name, data_dir=self.data_dir, with_info=True)
if self.is_regression:
self._labels = None
else:
self._labels = list(range(info.features[self.label_key].num_classes))
def _process_tfds_params_str(self, params_str):
"""Extracts TFDS parameters from a comma-separated assignements string."""
dtype_map = {"int": int, "float": float}
cast_str_to_bool = lambda s: s.lower() not in ["false", "0"]
tuples = [x.split("=") for x in params_str.split(",")]
d = {k.strip(): v.strip() for k, v in tuples}
self.dataset_name = d["dataset"] # Required.
self.data_dir = d.get("data_dir", None)
self.module_import = d.get("module_import", None)
self.train_split = d.get("train_split", "train")
self.dev_split = d.get("dev_split", "validation")
self.test_split = d.get("test_split", "test")
self.text_key = d.get("text_key", "text")
self.text_b_key = d.get("text_b_key", None)
self.label_key = d.get("label_key", "label")
self.test_text_key = d.get("test_text_key", self.text_key)
self.test_text_b_key = d.get("test_text_b_key", self.text_b_key)
self.test_label = d.get("test_label", "test_example")
self.label_type = dtype_map[d.get("label_type", "int")]
self.is_regression = cast_str_to_bool(d.get("is_regression", "False"))
self.weight_key = d.get("weight_key", None)
self.skip_label = d.get("skip_label", None)
if self.skip_label is not None:
self.skip_label = self.label_type(self.skip_label)
def get_train_examples(self, data_dir):
assert data_dir is None
return self._create_examples(self.train_split, "train")
def get_dev_examples(self, data_dir):
assert data_dir is None
return self._create_examples(self.dev_split, "dev")
def get_test_examples(self, data_dir):
assert data_dir is None
return self._create_examples(self.test_split, "test")
def get_labels(self):
return self._labels
def get_processor_name(self):
return "TFDS_" + self.dataset_name
def _create_examples(self, split_name, set_type):
"""Creates examples for the training/dev/test sets."""
if split_name not in self.dataset:
raise ValueError("Split {} not available.".format(split_name))
dataset = self.dataset[split_name].as_numpy_iterator()
examples = []
text_b, weight = None, None
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = self.process_text_fn(example[self.test_text_key])
if self.test_text_b_key:
text_b = self.process_text_fn(example[self.test_text_b_key])
label = self.test_label
else:
text_a = self.process_text_fn(example[self.text_key])
if self.text_b_key:
text_b = self.process_text_fn(example[self.text_b_key])
label = self.label_type(example[self.label_key])
if self.skip_label is not None and label == self.skip_label:
continue
if self.weight_key:
weight = float(example[self.weight_key])
examples.append(
InputExample(
guid=guid,
text_a=text_a,
text_b=text_b,
label=label,
weight=weight))
return examples
class WnliProcessor(DefaultGLUEDataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "WNLI"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/wnli", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "0"
text_a = self.process_text_fn(example["sentence1"])
text_b = self.process_text_fn(example["sentence2"])
if set_type != "test":
label = str(example["label"])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
supported_languages = [
"ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr",
"ur", "vi", "zh"
]
def __init__(self,
language="en",
process_text_fn=tokenization.convert_to_unicode):
super(XnliProcessor, self).__init__(process_text_fn)
if language == "all":
self.languages = XnliProcessor.supported_languages
elif language not in XnliProcessor.supported_languages:
raise ValueError("language %s is not supported for XNLI task." % language)
else:
self.languages = [language]
def get_train_examples(self, data_dir):
"""See base class."""
lines = []
for language in self.languages:
# Skips the header.
lines.extend(
self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % language))[1:])
examples = []
for i, line in enumerate(lines):
guid = "train-%d" % i
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
if label == self.process_text_fn("contradictory"):
label = self.process_text_fn("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % i
text_a = self.process_text_fn(line[6])
text_b = self.process_text_fn(line[7])
label = self.process_text_fn(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.test.tsv"))
examples_by_lang = {k: [] for k in XnliProcessor.supported_languages}
for i, line in enumerate(lines):
if i == 0:
continue
guid = "test-%d" % i
language = self.process_text_fn(line[0])
text_a = self.process_text_fn(line[6])
text_b = self.process_text_fn(line[7])
label = self.process_text_fn(line[1])
examples_by_lang[language].append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples_by_lang
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
@staticmethod
def get_processor_name():
"""See base class."""
return "XNLI"
class XtremePawsxProcessor(DataProcessor):
"""Processor for the XTREME PAWS-X data set."""
supported_languages = ["de", "en", "es", "fr", "ja", "ko", "zh"]
def __init__(self,
process_text_fn=tokenization.convert_to_unicode,
translated_data_dir=None,
only_use_en_dev=True):
"""See base class.
Args:
process_text_fn: See base class.
translated_data_dir: If specified, will also include translated data in
the training and testing data.
only_use_en_dev: If True, only use english dev data. Otherwise, use dev
data from all languages.
"""
super(XtremePawsxProcessor, self).__init__(process_text_fn)
self.translated_data_dir = translated_data_dir
self.only_use_en_dev = only_use_en_dev
def get_train_examples(self, data_dir):
"""See base class."""
examples = []
if self.translated_data_dir is None:
lines = self._read_tsv(os.path.join(data_dir, "train-en.tsv"))
for i, line in enumerate(lines):
guid = "train-%d" % i
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
else:
for lang in self.supported_languages:
lines = self._read_tsv(
os.path.join(self.translated_data_dir, "translate-train",
f"en-{lang}-translated.tsv"))
for i, line in enumerate(lines):
guid = f"train-{lang}-{i}"
text_a = self.process_text_fn(line[2])
text_b = self.process_text_fn(line[3])
label = self.process_text_fn(line[4])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
examples = []
if self.only_use_en_dev:
lines = self._read_tsv(os.path.join(data_dir, "dev-en.tsv"))
for i, line in enumerate(lines):
guid = "dev-%d" % i
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
else:
for lang in self.supported_languages:
lines = self._read_tsv(os.path.join(data_dir, f"dev-{lang}.tsv"))
for i, line in enumerate(lines):
guid = f"dev-{lang}-{i}"
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
examples_by_lang = {}
for lang in self.supported_languages:
examples_by_lang[lang] = []
lines = self._read_tsv(os.path.join(data_dir, f"test-{lang}.tsv"))
for i, line in enumerate(lines):
guid = f"test-{lang}-{i}"
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = "0"
examples_by_lang[lang].append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if self.translated_data_dir is not None:
for lang in self.supported_languages:
if lang == "en":
continue
examples_by_lang[f"{lang}-en"] = []
lines = self._read_tsv(
os.path.join(self.translated_data_dir, "translate-test",
f"test-{lang}-en-translated.tsv"))
for i, line in enumerate(lines):
guid = f"test-{lang}-en-{i}"
text_a = self.process_text_fn(line[2])
text_b = self.process_text_fn(line[3])
label = "0"
examples_by_lang[f"{lang}-en"].append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples_by_lang
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "XTREME-PAWS-X"
class XtremeXnliProcessor(DataProcessor):
"""Processor for the XTREME XNLI data set."""
supported_languages = [
"ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr",
"ur", "vi", "zh"
]
def __init__(self,
process_text_fn=tokenization.convert_to_unicode,
translated_data_dir=None,
only_use_en_dev=True):
"""See base class.
Args:
process_text_fn: See base class.
translated_data_dir: If specified, will also include translated data in
the training data.
only_use_en_dev: If True, only use english dev data. Otherwise, use dev
data from all languages.
"""
super(XtremeXnliProcessor, self).__init__(process_text_fn)
self.translated_data_dir = translated_data_dir
self.only_use_en_dev = only_use_en_dev
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "train-en.tsv"))
examples = []
if self.translated_data_dir is None:
for i, line in enumerate(lines):
guid = "train-%d" % i
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
if label == self.process_text_fn("contradictory"):
label = self.process_text_fn("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
else:
for lang in self.supported_languages:
lines = self._read_tsv(
os.path.join(self.translated_data_dir, "translate-train",
f"en-{lang}-translated.tsv"))
for i, line in enumerate(lines):
guid = f"train-{lang}-{i}"
text_a = self.process_text_fn(line[2])
text_b = self.process_text_fn(line[3])
label = self.process_text_fn(line[4])
if label == self.process_text_fn("contradictory"):
label = self.process_text_fn("contradiction")
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
examples = []
if self.only_use_en_dev:
lines = self._read_tsv(os.path.join(data_dir, "dev-en.tsv"))
for i, line in enumerate(lines):
guid = "dev-%d" % i
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
else:
for lang in self.supported_languages:
lines = self._read_tsv(os.path.join(data_dir, f"dev-{lang}.tsv"))
for i, line in enumerate(lines):
guid = f"dev-{lang}-{i}"
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
if label == self.process_text_fn("contradictory"):
label = self.process_text_fn("contradiction")
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
examples_by_lang = {}
for lang in self.supported_languages:
examples_by_lang[lang] = []
lines = self._read_tsv(os.path.join(data_dir, f"test-{lang}.tsv"))
for i, line in enumerate(lines):
guid = f"test-{lang}-{i}"
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = "contradiction"
examples_by_lang[lang].append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if self.translated_data_dir is not None:
for lang in self.supported_languages:
if lang == "en":
continue
examples_by_lang[f"{lang}-en"] = []
lines = self._read_tsv(
os.path.join(self.translated_data_dir, "translate-test",
f"test-{lang}-en-translated.tsv"))
for i, line in enumerate(lines):
guid = f"test-{lang}-en-{i}"
text_a = self.process_text_fn(line[2])
text_b = self.process_text_fn(line[3])
label = "contradiction"
examples_by_lang[f"{lang}-en"].append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples_by_lang
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
@staticmethod
def get_processor_name():
"""See base class."""
return "XTREME-XNLI"
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
label_map = {}
if label_list:
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
seg_id_a = 0
seg_id_b = 1
seg_id_cls = 0
seg_id_pad = 0
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(seg_id_cls)
for token in tokens_a:
tokens.append(token)
segment_ids.append(seg_id_a)
tokens.append("[SEP]")
segment_ids.append(seg_id_a)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(seg_id_b)
tokens.append("[SEP]")
segment_ids.append(seg_id_b)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(seg_id_pad)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label] if label_map else example.label
if ex_index < 5:
logging.info("*** Example ***")
logging.info("guid: %s", (example.guid))
logging.info("tokens: %s",
" ".join([tokenization.printable_text(x) for x in tokens]))
logging.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logging.info("label: %s (id = %s)", example.label, str(label_id))
logging.info("weight: %s", example.weight)
logging.info("example_id: %s", example.example_id)
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True,
weight=example.weight,
example_id=example.example_id)
return feature
class AXgProcessor(DataProcessor):
"""Processor for the AXg dataset (SuperGLUE diagnostics dataset)."""
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "AX-g.jsonl")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
@staticmethod
def get_processor_name():
"""See base class."""
return "AXg"
def _create_examples(self, lines, set_type):
"""Creates examples for the training/dev/test sets."""
examples = []
for line in lines:
guid = "%s-%s" % (set_type, self.process_text_fn(str(line["idx"])))
text_a = self.process_text_fn(line["premise"])
text_b = self.process_text_fn(line["hypothesis"])
label = self.process_text_fn(line["label"])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class BoolQProcessor(DefaultGLUEDataProcessor):
"""Processor for the BoolQ dataset (SuperGLUE diagnostics dataset)."""
def get_labels(self):
"""See base class."""
return ["True", "False"]
@staticmethod
def get_processor_name():
"""See base class."""
return "BoolQ"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"super_glue/boolq", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for example in dataset:
guid = "%s-%s" % (set_type, self.process_text_fn(str(example["idx"])))
text_a = self.process_text_fn(example["question"])
text_b = self.process_text_fn(example["passage"])
label = "False"
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class CBProcessor(DefaultGLUEDataProcessor):
"""Processor for the CB dataset (SuperGLUE diagnostics dataset)."""
def get_labels(self):
"""See base class."""
return ["entailment", "neutral", "contradiction"]
@staticmethod
def get_processor_name():
"""See base class."""
return "CB"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"super_glue/cb", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for example in dataset:
guid = "%s-%s" % (set_type, self.process_text_fn(str(example["idx"])))
text_a = self.process_text_fn(example["premise"])
text_b = self.process_text_fn(example["hypothesis"])
label = "entailment"
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class SuperGLUERTEProcessor(DefaultGLUEDataProcessor):
"""Processor for the RTE dataset (SuperGLUE version)."""
def get_labels(self):
"""See base class."""
# All datasets are converted to 2-class split, where for 3-class datasets we
# collapse neutral and contradiction into not_entailment.
return ["entailment", "not_entailment"]
@staticmethod
def get_processor_name():
"""See base class."""
return "RTESuperGLUE"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
examples = []
dataset = tfds.load(
"super_glue/rte", split=set_type, try_gcs=True).as_numpy_iterator()
for example in dataset:
guid = "%s-%s" % (set_type, self.process_text_fn(str(example["idx"])))
text_a = self.process_text_fn(example["premise"])
text_b = self.process_text_fn(example["hypothesis"])
label = "entailment"
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WiCInputExample(InputExample):
"""Processor for the WiC dataset (SuperGLUE version)."""
def __init__(self,
guid,
text_a,
text_b=None,
label=None,
word=None,
weight=None,
example_id=None):
"""A single training/test example for simple seq regression/classification."""
super(WiCInputExample, self).__init__(guid, text_a, text_b, label, weight,
example_id)
self.word = word
class WiCProcessor(DefaultGLUEDataProcessor):
"""Processor for the RTE dataset (SuperGLUE version)."""
def get_labels(self):
"""Not used."""
return []
@staticmethod
def get_processor_name():
"""See base class."""
return "RTESuperGLUE"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
examples = []
dataset = tfds.load(
"super_glue/wic", split=set_type, try_gcs=True).as_numpy_iterator()
for example in dataset:
guid = "%s-%s" % (set_type, self.process_text_fn(str(example["idx"])))
text_a = self.process_text_fn(example["sentence1"])
text_b = self.process_text_fn(example["sentence2"])
word = self.process_text_fn(example["word"])
label = 0
if set_type != "test":
label = example["label"]
examples.append(
WiCInputExample(
guid=guid, text_a=text_a, text_b=text_b, word=word, label=label))
return examples
def featurize_example(self, ex_index, example, label_list, max_seq_length,
tokenizer):
"""Here we concate sentence1, sentence2, word together with [SEP] tokens."""
del label_list
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = tokenizer.tokenize(example.text_b)
tokens_word = tokenizer.tokenize(example.word)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP], [SEP] with "- 4"
# Here we only pop out the first two sentence tokens.
_truncate_seq_pair(tokens_a, tokens_b,
max_seq_length - 4 - len(tokens_word))
seg_id_a = 0
seg_id_b = 1
seg_id_c = 2
seg_id_cls = 0
seg_id_pad = 0
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(seg_id_cls)
for token in tokens_a:
tokens.append(token)
segment_ids.append(seg_id_a)
tokens.append("[SEP]")
segment_ids.append(seg_id_a)
for token in tokens_b:
tokens.append(token)
segment_ids.append(seg_id_b)
tokens.append("[SEP]")
segment_ids.append(seg_id_b)
for token in tokens_word:
tokens.append(token)
segment_ids.append(seg_id_c)
tokens.append("[SEP]")
segment_ids.append(seg_id_c)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(seg_id_pad)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = example.label
if ex_index < 5:
logging.info("*** Example ***")
logging.info("guid: %s", (example.guid))
logging.info("tokens: %s",
" ".join([tokenization.printable_text(x) for x in tokens]))
logging.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logging.info("label: %s (id = %s)", example.label, str(label_id))
logging.info("weight: %s", example.weight)
logging.info("example_id: %s", example.example_id)
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True,
weight=example.weight,
example_id=example.example_id)
return feature
def file_based_convert_examples_to_features(examples,
label_list,
max_seq_length,
tokenizer,
output_file,
label_type=None,
featurize_fn=None):
"""Convert a set of `InputExample`s to a TFRecord file."""
tf.io.gfile.makedirs(os.path.dirname(output_file))
writer = tf.io.TFRecordWriter(output_file)
for ex_index, example in enumerate(examples):
if ex_index % 10000 == 0:
logging.info("Writing example %d of %d", ex_index, len(examples))
if featurize_fn:
feature = featurize_fn(ex_index, example, label_list, max_seq_length,
tokenizer)
else:
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if label_type is not None and label_type == float:
features["label_ids"] = create_float_feature([feature.label_id])
elif feature.label_id is not None:
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
if feature.weight is not None:
features["weight"] = create_float_feature([feature.weight])
if feature.example_id is not None:
features["example_id"] = create_int_feature([feature.example_id])
else:
features["example_id"] = create_int_feature([ex_index])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def generate_tf_record_from_data_file(processor,
data_dir,
tokenizer,
train_data_output_path=None,
eval_data_output_path=None,
test_data_output_path=None,
max_seq_length=128):
"""Generates and saves training data into a tf record file.
Args:
processor: Input processor object to be used for generating data. Subclass
of `DataProcessor`.
data_dir: Directory that contains train/eval/test data to process.
tokenizer: The tokenizer to be applied on the data.
train_data_output_path: Output to which processed tf record for training
will be saved.
eval_data_output_path: Output to which processed tf record for evaluation
will be saved.
test_data_output_path: Output to which processed tf record for testing
will be saved. Must be a pattern template with {} if processor has
language specific test data.
max_seq_length: Maximum sequence length of the to be generated
training/eval data.
Returns:
A dictionary containing input meta data.
"""
assert train_data_output_path or eval_data_output_path
label_list = processor.get_labels()
label_type = getattr(processor, "label_type", None)
is_regression = getattr(processor, "is_regression", False)
has_sample_weights = getattr(processor, "weight_key", False)
num_training_data = 0
if train_data_output_path:
train_input_data_examples = processor.get_train_examples(data_dir)
file_based_convert_examples_to_features(train_input_data_examples,
label_list, max_seq_length,
tokenizer, train_data_output_path,
label_type,
processor.featurize_example)
num_training_data = len(train_input_data_examples)
if eval_data_output_path:
eval_input_data_examples = processor.get_dev_examples(data_dir)
file_based_convert_examples_to_features(eval_input_data_examples,
label_list, max_seq_length,
tokenizer, eval_data_output_path,
label_type,
processor.featurize_example)
meta_data = {
"processor_type": processor.get_processor_name(),
"train_data_size": num_training_data,
"max_seq_length": max_seq_length,
}
if test_data_output_path:
test_input_data_examples = processor.get_test_examples(data_dir)
if isinstance(test_input_data_examples, dict):
for language, examples in test_input_data_examples.items():
file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer,
test_data_output_path.format(language), label_type,
processor.featurize_example)
meta_data["test_{}_data_size".format(language)] = len(examples)
else:
file_based_convert_examples_to_features(test_input_data_examples,
label_list, max_seq_length,
tokenizer, test_data_output_path,
label_type,
processor.featurize_example)
meta_data["test_data_size"] = len(test_input_data_examples)
if is_regression:
meta_data["task_type"] = "bert_regression"
meta_data["label_type"] = {int: "int", float: "float"}[label_type]
else:
meta_data["task_type"] = "bert_classification"
meta_data["num_labels"] = len(processor.get_labels())
if has_sample_weights:
meta_data["has_sample_weights"] = True
if eval_data_output_path:
meta_data["eval_data_size"] = len(eval_input_data_examples)
return meta_data
|
the-stack_0_7527 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from pyzeppelin.config import ClientConfig
from pyzeppelin.notebook import Note
from pyzeppelin.notebook import Paragraph
import time
import logging
class SessionInfo:
def __init__(self, resp_json):
"""
:param resp_json:
"""
self.session_id = None
self.note_id = None
self.interpreter = None
self.state = None
self.weburl = None
self.start_time = None
if "sessionId" in resp_json:
self.session_id = resp_json['sessionId']
if "noteId" in resp_json:
self.note_id = resp_json['noteId']
if "interpreter" in resp_json:
self.interpreter = resp_json['interpreter']
if "state" in resp_json:
self.state = resp_json['state']
if "weburl" in resp_json:
self.weburl = resp_json['weburl']
if "startTime" in resp_json:
self.start_time = resp_json['startTime']
class ZeppelinClient:
"""
Low leve of Zeppelin SDK, this is used to interact with Zeppelin in note/paragraph abstraction layer.
"""
def __init__(self, client_config):
self.client_config = client_config
self.zeppelin_rest_url = client_config.get_zeppelin_rest_url()
self.session = requests.Session()
def _check_response(self, resp):
if resp.status_code != 200:
raise Exception("Invoke rest api failed, status code: {}, status text: {}".format(
resp.status_code, resp.text))
def get_version(self):
"""
Return Zeppelin version
:return:
"""
resp = self.session.get(self.zeppelin_rest_url + "/api/version")
self._check_response(resp)
return resp.json()['body']['version']
def login(self, user_name, password, knox_sso = None):
"""
Login to Zeppelin, use knox_sso if it is provided.
:param user_name:
:param password:
:param knox_sso:
:return:
"""
if knox_sso:
self.session.auth = (user_name, password)
resp = self.session.get(knox_sso + "?originalUrl=" + self.zeppelin_rest_url, verify=False)
if resp.status_code != 200:
raise Exception("Knox SSO login fails, status: {}, status_text: {}" \
.format(resp.status_code, resp.text))
resp = self.session.get(self.zeppelin_rest_url + "/api/security/ticket")
if resp.status_code != 200:
raise Exception("Fail to get ticket after Knox SSO, status: {}, status_text: {}" \
.format(resp.status_code, resp.text))
else:
resp = self.session.post(self.zeppelin_rest_url + "/api/login",
data = {'userName': user_name, 'password': password})
self._check_response(resp)
def create_note(self, note_path, default_interpreter_group = 'spark'):
"""
Create a new note with give note_path and default_interpreter_group
:param note_path:
:param default_interpreter_group:
:return:
"""
resp = self.session.post(self.zeppelin_rest_url + "/api/notebook",
json = {'name' : note_path, 'defaultInterpreterGroup': default_interpreter_group})
self._check_response(resp)
return resp.json()['body']
def delete_note(self, note_id):
"""
Delete a note with give note_id
:param note_id:
:return:
"""
resp = self.session.delete(self.zeppelin_rest_url + "/api/notebook/" + note_id)
self._check_response(resp)
def query_note_result(self, note_id):
"""
Query note result via Zeppelin rest api and convert the returned json to NoteResult
:param note_id:
:return:
"""
resp = self.session.get(self.zeppelin_rest_url + "/api/notebook/" + note_id)
self._check_response(resp)
note_json = resp.json()['body']
return Note(note_json)
def execute_note(self, note_id, params = {}):
"""
Execute give note with params, block until note execution is finished.
:param note_id:
:param params:
:return:
"""
self.submit_note(note_id, params)
return self.wait_until_note_finished(note_id)
def submit_note(self, note_id, params = {}):
"""
Execute give note with params, return once submission is finished. It is non-blocking api,
won't wait for the completion of note execution.
:param note_id:
:param params:
:return:
"""
logging.info("Submitting note: " + note_id + " with params: " + str(params))
resp = self.session.post(self.zeppelin_rest_url + "/api/notebook/job/" + note_id,
params = {'blocking': 'false', 'isolated': 'true', 'reload': 'true'},
json = {'params': params})
self._check_response(resp)
return self.query_note_result(note_id)
def wait_until_note_finished(self, note_id):
"""
Wait until note execution is finished.
:param note_id:
:return:
"""
while True:
note_result = self.query_note_result(note_id)
logging.info("note_is_running: " + str(note_result.is_running) + ", jobURL: " +
str(list(map(lambda p: p.jobUrls, filter(lambda p: p.jobUrls, note_result.paragraphs)))))
if not note_result.is_running:
return note_result
time.sleep(self.client_config.get_query_interval())
def reload_note_list(self):
resp = self.session.get(self.zeppelin_rest_url + "/api/notebook", params = {'reload': 'true'})
self._check_response(resp)
return resp.json()['body']
def get_note(self, note_id, reload = False):
"""
Get specified note.
:param note_id:
:param reload:
:return:
"""
resp = self.session.get(self.zeppelin_rest_url + "/api/notebook/" + note_id, params = {'reload': reload})
self._check_response(resp)
return resp.json()['body']
def clone_note(self, note_id, dest_note_path):
"""
Clone specific note to another location.
:param note_id:
:param dest_note_path:
:return:
"""
resp = self.session.post(self.zeppelin_rest_url + "/api/notebook/" + note_id, json = {'name': dest_note_path})
self._check_response(resp)
return resp.json()['body']
def add_paragraph(self, note_id, title, text):
"""
Add paragraph to specific note at the last paragraph
:param note_id:
:param title:
:param text:
:return:
"""
resp = self.session.post(self.zeppelin_rest_url + "/api/notebook/" + note_id + "/paragraph", json = {'title': title, 'text': text})
self._check_response(resp)
return resp.json()['body']
def update_paragraph(self, note_id, paragraph_id, title, text):
"""
update specified paragraph with given title and text
:param note_id:
:param paragraph_id:
:param title:
:param text:
:return:
"""
resp = self.session.put(self.zeppelin_rest_url + "/api/notebook/" + note_id + "/paragraph/" + paragraph_id,
json = {'title' : title, 'text' : text})
self._check_response(resp)
def execute_paragraph(self, note_id, paragraph_id, params = {}, session_id = "", isolated = False):
"""
Blocking api, execute specified paragraph with given params
:param note_id:
:param paragraph_id:
:param params:
:param session_id:
:param isolated:
:return:
"""
self.submit_paragraph(note_id, paragraph_id, params, session_id, isolated)
return self.wait_until_paragraph_finished(note_id, paragraph_id)
def submit_paragraph(self, note_id, paragraph_id, params = {}, session_id = "", isolated = False):
"""
Non-blocking api, execute specified paragraph with given params.
:param note_id:
:param paragraph_id:
:param params:
:param session_id:
:param isolated:
:return:
"""
logging.info("Submitting paragraph: " + paragraph_id + " with params: " + str(params))
resp = self.session.post(self.zeppelin_rest_url + "/api/notebook/job/" + note_id + "/" + paragraph_id,
params = {'sessionId': session_id, 'isolated': isolated, 'reload': 'true'},
json = {'params': params})
self._check_response(resp)
return self.query_paragraph_result(note_id, paragraph_id)
def query_paragraph_result(self, note_id, paragraph_id):
"""
Query specified paragraph result.
:param note_id:
:param paragraph_id:
:return:
"""
resp = self.session.get(self.zeppelin_rest_url + "/api/notebook/" + note_id + "/paragraph/" + paragraph_id)
self._check_response(resp)
return Paragraph(resp.json()['body'])
def wait_until_paragraph_finished(self, note_id, paragraph_id):
"""
Wait until specified paragraph execution is finished
:param note_id:
:param paragraph_id:
:return:
"""
while True:
paragraph_result = self.query_paragraph_result(note_id, paragraph_id)
logging.info("paragraph_status: " + str(paragraph_result.status) + ", jobURL: " + str(paragraph_result.jobUrls))
if paragraph_result.is_completed():
return paragraph_result
time.sleep(self.client_config.get_query_interval())
def cancel_paragraph(self, note_id, paragraph_id):
"""
Cancel specified paragraph execution.
:param note_id:
:param paragraph_id:
:return:
"""
resp = self.session.delete(self.zeppelin_rest_url + "/api/notebook/job/" + note_id + "/" + paragraph_id)
self._check_response(resp)
def cancel_note(self, note_id):
"""
Cancel specified note execution.
:param note_id:
:return:
"""
resp = self.session.delete(self.zeppelin_rest_url + "/api/notebook/job/" + note_id)
self._check_response(resp)
resp = self.session.delete(self.zeppelin_rest_url + "/api/notebook/job/" + note_id)
self._check_response(resp)
def new_session(self, interpreter):
"""
Create new ZSession for specified interpreter
:param interpreter:
:return:
"""
resp = self.session.post(self.zeppelin_rest_url + "/api/session",
params = {'interpreter': interpreter})
self._check_response(resp)
return SessionInfo(resp.json()['body'])
def stop_session(self, session_id):
"""
Stop specified ZSession
:param session_id:
:return:
"""
resp = self.session.delete(self.zeppelin_rest_url + "/api/session/" + session_id)
self._check_response(resp)
def get_session(self, session_id):
"""
Get SessionInfo of specified session_id
:param session_id:
:return:
"""
resp = self.session.get(self.zeppelin_rest_url + "/api/session/" + session_id)
if resp.status_code == 404:
raise Exception("No such session: " + session_id)
self._check_response(resp)
return SessionInfo(resp.json()['body'])
def next_session_paragraph(self, note_id, max_statement):
"""
Create a new paragraph for specified session.
:param note_id:
:param max_statement:
:return:
"""
resp = self.session.post(self.zeppelin_rest_url + "/api/notebook/" + note_id +"/paragraph/next",
params= {'maxParagraph' : max_statement})
self._check_response(resp)
return resp.json()['message']
if __name__ == "__main__":
client_config = ClientConfig("")
client = ZeppelinClient(client_config)
client.login("", "", knox_sso="https://:8443/gateway/knoxsso/api/v1/websso")
print('version:' + client.get_version())
note_id = None;
try:
note_id = client.create_note('/test/note_18', 'spark')
note_result = client.query_note_result(note_id)
print(note_result)
client.submit_note(note_id)
note_result = client.wait_until_note_finished(note_id)
print("note is finished")
print("note_result: " + str(note_result))
paragraph_id = client.add_paragraph(note_id, 'title', '%sh pwd')
client.submit_paragraph(note_id, paragraph_id)
client.wait_until_paragraph_finished(note_id, paragraph_id)
note_result = client.query_note_result(note_id)
print("note is finished")
print("note_result: " + str(note_result))
print(note_result)
finally:
if note_id:
pass
# client.delete_note(note_id)
|
the-stack_0_7528 | """
https://stackoverflow.com/questions/42394585/how-to-inspect-a-tensorflow-tfrecord-file
for example in tf.python_io.tf_record_iterator("data/foobar.tfrecord"):
result = tf.train.Example.FromString(example)
"""
import csv
import functools
import json
import multiprocessing.dummy
import os
import random
import numpy as np
import skimage.draw
import skimage.io
import tensorflow as tf
import qdraw.dataset as dataset
def perturb_strokes(strokes, d):
"""
"""
output_strokes = []
for xs, ys in strokess:
xs = [x + random.randint(-d, d) for x in xs]
ys = [y + random.randint(-d, d) for y in ys]
output_strokes.append((xs, ys))
# NOTE: rotate
return output_strokes
def normalize_strokes_to_uniform(strokes):
"""
"""
def extremum(ls, idx, fun):
"""
"""
for i, points in enumerate(ls):
m = fun(points[idx])
n = m if i == 0 else fun(n, m)
return float(n)
output_strokes = []
min_x, min_y = extremum(strokes, 0, min), extremum(strokes, 1, min)
max_x, max_y = extremum(strokes, 0, max), extremum(strokes, 1, max)
mid_x, mid_y = 0.5 * (max_x + min_x), 0.5 * (max_y + min_y)
# NOTE: scale
s = 2.0 / max(max_x - min_x, max_y - min_y)
for xs, ys in strokes:
xs = [(x - mid_x) * s for x in xs]
ys = [(y - mid_y) * s for y in ys]
output_strokes.append((xs, ys))
return output_strokes
def normalize_strokes_to_image(strokes, image_size):
"""
"""
def extremum(ls, idx, fun):
"""
"""
for i, points in enumerate(ls):
m = fun(points[idx])
n = m if i == 0 else fun(n, m)
return n
output_strokes = []
min_x, min_y = extremum(strokes, 0, min), extremum(strokes, 1, min)
max_x, max_y = extremum(strokes, 0, max), extremum(strokes, 1, max)
# NOTE: scale to fix image_size
s = max(max_x - min_x, max_y - min_y)
t = image_size - 1
for xs, ys in strokes:
xs = [(x - min_x) * t // s for x in xs]
ys = [(y - min_y) * t // s for y in ys]
output_strokes.append((xs, ys))
strokes, output_strokes = output_strokes, []
# NOTE: move to center
tx = (t - extremum(strokes, 0, max)) // 2
ty = (t - extremum(strokes, 1, max)) // 2
for xs, ys in strokes:
xs = [x + tx for x in xs]
ys = [y + ty for y in ys]
output_strokes.append((xs, ys))
return output_strokes
def strokes_to_image(strokes, image_size):
"""
"""
image = np.zeros((image_size, image_size), dtype=np.uint8)
for xs, ys in strokes:
for i in range(1, len(xs)):
rr, cc = skimage.draw.line(ys[i-1], xs[i-1], ys[i], xs[i])
image[rr, cc] = 255
return image
def strokes_to_points(strokes):
"""
"""
num_points = sum([len(xs) for xs, ys in strokes])
points = np.zeros((num_points, 3), dtype=np.float32)
base = 0
for xs, ys in strokes:
for i in range(len(xs)):
points[base + i, 0] = xs[i]
points[base + i, 1] = ys[i]
base += len(xs)
points[base - 1, 2] = 1.0
return points.flatten().tostring()
def int64_feature(v):
"""
create a feature which contains a 64-bits integer
"""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[v]))
def image_feature(image):
"""
create a feature which contains 32-bits floats in binary format.
"""
image = image.astype(np.uint8).tostring()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[image]))
def draw_to_complex_example(draw, image_size, add_perturbation):
"""
"""
strokes = draw[1]
if add_perturbation:
pass
strokes_uniform = normalize_strokes_to_uniform(strokes)
strokes_image = normalize_strokes_to_image(strokes, image_size)
points = strokes_to_points(strokes_uniform)
image = strokes_to_image(strokes_image, image_size)
feature = {}
feature['keyid'] = int64_feature(draw[0])
feature['strokes'] = \
tf.train.Feature(bytes_list=tf.train.BytesList(value=[points]))
feature['image'] = image_feature(image)
if len(draw) > 2:
feature['label'] = int64_feature(draw[2])
return tf.train.Example(features=tf.train.Features(feature=feature))
def columns_to_strokes_and_label(
columns, index_keyid, index_strokes, index_label):
"""
"""
keyid = int(columns[index_keyid])
strokes = json.loads(columns[index_strokes])
if index_label >= 0:
label = columns[index_label].replace(' ', '_')
label = dataset.label_to_index[label]
return (keyid, strokes, label)
else:
return (keyid, strokes,)
def preprocess(description):
"""
"""
columns_to_draw = functools.partial(
columns_to_strokes_and_label,
index_keyid=description['index_keyid'],
index_strokes=description['index_strokes'],
index_label=description['index_label'])
draws = []
for source_path in description['source_paths']:
with open(source_path, newline='') as csv_file:
csv_draws = csv.reader(csv_file, delimiter=',')
draws.extend([columns_to_draw(columns) for columns in csv_draws])
random.shuffle(draws)
draw_to_example = functools.partial(
draw_to_complex_example,
image_size=description['image_size'],
add_perturbation=description['add_perturbation'])
# NOTE: load images
with tf.python_io.TFRecordWriter(description['result_path']) as writer:
for draw in draws:
example = draw_to_example(draw)
writer.write(example.SerializeToString())
print('done: {}'.format(description['result_path']))
def collect_source_paths():
"""
assume csv with the same label reside in the same directory
"""
FLAGS = tf.app.flags.FLAGS
source_paths_collection = []
for dir_path, dir_names, file_names in os.walk(FLAGS.source_dir):
if len(file_names) == 0:
continue
# NOTE: only csv which is raw data
source_names = [n for n in file_names if n.endswith('.csv')]
source_paths = [os.path.join(dir_path, n) for n in source_names]
source_paths_collection.append(source_paths)
random.shuffle(source_paths_collection)
return source_paths_collection
def preprocess_training():
"""
"""
FLAGS = tf.app.flags.FLAGS
source_paths_collection = collect_source_paths()
# NOTE: currently, instead of reusing data, we dorp remainder here
num_output = min([len(ps) for ps in source_paths_collection])
with multiprocessing.dummy.Pool(32) as pool:
for i in range(0, num_output, 32):
descriptions = []
for index in range(i, min(i + 32, num_output)):
# NOTE: build result name with prefix and index
result_name = '{}_{:0>4}.tfrecord'.format(FLAGS.prefix, index)
result_path = os.path.join(FLAGS.result_dir, result_name)
# NOTE: mix source from each categories
source_paths = [ps[index] for ps in source_paths_collection]
# NOTE: build args for one task
description = {
'source_paths': source_paths,
'result_path': result_path,
'image_size': FLAGS.image_size,
'index_keyid': FLAGS.index_keyid,
'index_label': FLAGS.index_label,
'index_strokes': FLAGS.index_strokes,
'add_perturbation': FLAGS.add_perturbation,
}
descriptions.append(description)
pool.map(preprocess, descriptions)
def preprocess_testing():
"""
"""
FLAGS = tf.app.flags.FLAGS
columns_to_draw = functools.partial(
columns_to_strokes_and_label,
index_keyid=FLAGS.index_keyid,
index_strokes=FLAGS.index_strokes,
index_label=FLAGS.index_label)
with open(FLAGS.source_csv_path, newline='') as csv_file:
csv_draws = csv.reader(csv_file, delimiter=',')
draws = [columns_to_draw(columns) for columns in csv_draws]
draw_to_example = functools.partial(
draw_to_complex_example,
image_size=FLAGS.image_size,
add_perturbation=FLAGS.add_perturbation)
# NOTE: load images
with tf.python_io.TFRecordWriter(FLAGS.result_tfr_path) as writer:
for draw in draws:
example = draw_to_example(draw)
writer.write(example.SerializeToString())
def main(_):
"""
"""
FLAGS = tf.app.flags.FLAGS
if FLAGS.source_dir is not None and tf.gfile.Exists(FLAGS.source_dir):
preprocess_training()
if FLAGS.source_csv_path is not None and tf.gfile.Exists(FLAGS.source_csv_path):
preprocess_testing()
if __name__ == '__main__':
# NOTE: to handle single csv (test.csv, no shuffle)
tf.app.flags.DEFINE_string('source_csv_path', None, '')
tf.app.flags.DEFINE_string('result_tfr_path', None, '')
tf.app.flags.DEFINE_string('source_dir', None, '')
tf.app.flags.DEFINE_string('result_dir', None, '')
tf.app.flags.DEFINE_string('prefix', '', '')
tf.app.flags.DEFINE_boolean('add_perturbation', False, '')
tf.app.flags.DEFINE_integer('image_size', 32, '')
tf.app.flags.DEFINE_integer('index_keyid', -1, '')
tf.app.flags.DEFINE_integer('index_label', 5, '')
tf.app.flags.DEFINE_integer('index_strokes', 1, '')
tf.app.run()
|
the-stack_0_7531 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import io
import tarfile
import numpy as np
from PIL import Image
import paddle
from paddle.io import Dataset
from paddle.dataset.common import _check_exists_and_download
__all__ = ["VOC2012"]
VOC_URL = 'https://dataset.bj.bcebos.com/voc/VOCtrainval_11-May-2012.tar'
VOC_MD5 = '6cd6e144f989b92b3379bac3b3de84fd'
SET_FILE = 'VOCdevkit/VOC2012/ImageSets/Segmentation/{}.txt'
DATA_FILE = 'VOCdevkit/VOC2012/JPEGImages/{}.jpg'
LABEL_FILE = 'VOCdevkit/VOC2012/SegmentationClass/{}.png'
CACHE_DIR = 'voc2012'
MODE_FLAG_MAP = {'train': 'trainval', 'test': 'train', 'valid': "val"}
class VOC2012(Dataset):
"""
Implementation of `VOC2012 <http://host.robots.ox.ac.uk/pascal/VOC/voc2012/>`_ dataset
To speed up the download, we put the data on https://dataset.bj.bcebos.com/voc/VOCtrainval_11-May-2012.tar.
Original data can get from http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar.
Args:
data_file(str): path to data file, can be set None if
:attr:`download` is True. Default None, default data path: ~/.cache/paddle/dataset/voc2012
mode(str): 'train', 'valid' or 'test' mode. Default 'train'.
download(bool): download dataset automatically if :attr:`data_file` is None. Default True
backend(str, optional): Specifies which type of image to be returned:
PIL.Image or numpy.ndarray. Should be one of {'pil', 'cv2'}.
If this option is not set, will get backend from ``paddle.vsion.get_image_backend`` ,
default backend is 'pil'. Default: None.
Examples:
.. code-block:: python
import paddle
from paddle.vision.datasets import VOC2012
from paddle.vision.transforms import Normalize
class SimpleNet(paddle.nn.Layer):
def __init__(self):
super(SimpleNet, self).__init__()
def forward(self, image, label):
return paddle.sum(image), label
normalize = Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5],
data_format='HWC')
voc2012 = VOC2012(mode='train', transform=normalize, backend='cv2')
for i in range(10):
image, label= voc2012[i]
image = paddle.cast(paddle.to_tensor(image), 'float32')
label = paddle.to_tensor(label)
model = SimpleNet()
image, label= model(image, label)
print(image.numpy().shape, label.numpy().shape)
"""
def __init__(self,
data_file=None,
mode='train',
transform=None,
download=True,
backend=None):
assert mode.lower() in ['train', 'valid', 'test'], \
"mode should be 'train', 'valid' or 'test', but got {}".format(mode)
if backend is None:
backend = paddle.vision.get_image_backend()
if backend not in ['pil', 'cv2']:
raise ValueError(
"Expected backend are one of ['pil', 'cv2'], but got {}"
.format(backend))
self.backend = backend
self.flag = MODE_FLAG_MAP[mode.lower()]
self.data_file = data_file
if self.data_file is None:
assert download, "data_file is not set and downloading automatically is disabled"
self.data_file = _check_exists_and_download(
data_file, VOC_URL, VOC_MD5, CACHE_DIR, download)
self.transform = transform
# read dataset into memory
self._load_anno()
self.dtype = paddle.get_default_dtype()
def _load_anno(self):
self.name2mem = {}
self.data_tar = tarfile.open(self.data_file)
for ele in self.data_tar.getmembers():
self.name2mem[ele.name] = ele
set_file = SET_FILE.format(self.flag)
sets = self.data_tar.extractfile(self.name2mem[set_file])
self.data = []
self.labels = []
for line in sets:
line = line.strip()
data = DATA_FILE.format(line.decode('utf-8'))
label = LABEL_FILE.format(line.decode('utf-8'))
self.data.append(data)
self.labels.append(label)
def __getitem__(self, idx):
data_file = self.data[idx]
label_file = self.labels[idx]
data = self.data_tar.extractfile(self.name2mem[data_file]).read()
label = self.data_tar.extractfile(self.name2mem[label_file]).read()
data = Image.open(io.BytesIO(data))
label = Image.open(io.BytesIO(label))
if self.backend == 'cv2':
data = np.array(data)
label = np.array(label)
if self.transform is not None:
data = self.transform(data)
if self.backend == 'cv2':
return data.astype(self.dtype), label.astype(self.dtype)
return data, label
def __len__(self):
return len(self.data)
def __del__(self):
if self.data_tar:
self.data_tar.close()
|
the-stack_0_7532 | from . import configurations
from .. import exit, Path, save_configuration, save_weights, load_weights, load_configuration, handle_init, npprod
from tensorflow import data as tfdata, optimizers as tfoptimizers, reshape as tfreshape
from third_party.tensorflow.building.handler import Layer_Handler
from third_party.tensorflow.train.training_functions import tf_training_loop
from third_party.tensorflow.train import optimization, loss_functions
class Model:
def __init__(self, conf_name):
# Initialize weight and bias
self.weights = {}
self.bias = {}
self.layer_handler = Layer_Handler()
# Call initialization handler
handle_init(self, conf_name, configurations)
def save(self):
# Saves model weight variables
w = self.weights
b = self.bias
path = Path('models/NeuralNetworks/saved_models/')
if not path.exists():
path.mkdir()
path = save_weights(w, b, path.joinpath(
self.conf_class_name[0],
self.conf_class_name[1],
self.conf_name)
)
save_configuration(self.c, self.conf_name, path)
def load(self, path):
#Load weight variables
weights, biases = load_weights(path)
#print(type(weights))
self.weights = {}
self.bias = {}
for layer_name in weights.item():
self.weights[layer_name] = weights.item().get(layer_name)
self.bias[layer_name] = biases.item().get(layer_name)
def train(self,
datasets,
batch_size,
epochs,
learning_rate,
loss_function='cross_entropy_w_sigmoid',
optimization_function='classifier',
debug=False
):
if isinstance(datasets, tuple):
train, validate = datasets
else:
train = datasets
validate = None
#Define optimizer
optimizer = tfoptimizers.Adam(learning_rate)
#Define loss function
loss_function = getattr(loss_functions, loss_function)
#Define optimization function
if 'autoencoder' == optimization_function:
optimization_function = 'classifier'
autoencoder = True
else:
autoencoder = False
opt = getattr(optimization, optimization_function)
#Dataset operations
#Batch
if batch_size != 0:
train = train.batch(batch_size, drop_remainder=False)
else:
train = train.batch(1)
#Start training
tf_training_loop(
train,
validate,
self,
loss_function,
opt,
optimizer,
epochs,
True,
autoencoder=autoencoder,
debug=False
)
if not debug:
self.save()
print("Training finished...")
def handle_layers(self, x, config, name_specifier='', training=False):
# Clear specifier if the layer is on the main configuration (not encoder or decoder)
if name_specifier == 'main':
name_specifier = ''
for i, (layer_type, conf) in enumerate(config.items()):
layer_name = name_specifier+'_'+layer_type+'_'+str(i)
inputs = [x, self.weights, self.bias, conf, layer_name, self.c['data_type'], training]
if layer_type in dir(self.layer_handler):
x = getattr(self.layer_handler, layer_type)(*inputs)
# Handle RNNs
if isinstance(x, tuple):
x, states, outputs = x
else:
print("Layer type: ", layer_type, " was not found...")
exit()
return x
def encoder(self, x, training=False):
if 'encoder'in list(self.c.keys()):
handle_layers(
x,
self.c['encoder']['layers'],
'encoder',
training
)
else:
print("Model has not a defined encoder part...")
exit()
def decoder(self, x, training=False):
if 'decoder' in list(self.c.keys()):
handle_layers(
x,
self.c['decoder']['layers'],
'decoder',
training
)
else:
print("Model has not a defined decoder part...")
exit()
def run(self, x, training=False):
fed_input_shape = x.shape
# Reshape output
if x.shape[1:] != self.c['input_shape']:
in_shape = self.c['input_shape'].copy()
in_shape.insert(0, -1)
if npprod(x.shape[1:]) == npprod(in_shape[1:]):
x = tfreshape(x, in_shape)
# Get all layer configurations in a list
if not hasattr(self, 'layer_confs'):
layer_confs = {}
for key, c in list(self.c.items()):
if key == 'layers':
layer_confs['main'] = self.c['layers']
elif isinstance(c, dict):
if 'layers' in list(c.keys()):
layer_confs[key] = c['layers']
self.layer_confs = layer_confs
# Handle layer building and feeding through the model
for name, layer_conf in self.layer_confs.items():
x = self.handle_layers(x, layer_conf, name, training)
# Reshape output
if x.shape != fed_input_shape:
if npprod(x.shape[1:]) == npprod(fed_input_shape[1:]):
x = tfreshape(x, fed_input_shape)
return x
|
the-stack_0_7533 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair and Paul Rougieux.
JRC Biomass Project.
Unit D1 Bioeconomy.
"""
# Built-in modules #
import os
# Third party modules #
from tqdm import tqdm
import pandas
# First party modules #
from autopaths.auto_paths import AutoPaths
# Internal modules #
from libcbm_runner import libcbm_data_dir
from libcbm_runner.core.continent import continent
from libcbm_runner.pump.pre_processor import PreProcessor
# Constants #
interface_dir = libcbm_data_dir + 'interface/'
###############################################################################
class MakeActivities(object):
"""
This class will change the structure of the input data directory for
each country, and set it up for the new features containing both
activities, scenarios and combinations of the latter.
More information is contained in the notebooks of the `bioeconomy_notes`
repository. An example directory structure is the following:
ZZ
├── activities
│ ├── afforestation
│ │ ├── events.csv
│ │ ├── growth_curves.csv
│ │ ├── inventory.csv
│ │ └── transitions.csv
│ ├── deforestation
│ │ └── events.csv
│ ├── mgmt
│ │ ├── events.csv
│ │ ├── growth_curves.csv
│ │ ├── inventory.csv
│ │ └── transitions.csv
│ ├── nd_nsr
│ │ ├── events.csv
│ │ └── transitions.csv
│ └── nd_sr
│ ├── events_wide.csv
│ └── transitions.csv
├── common
│ ├── age_classes.csv
│ ├── classifiers.csv
│ └── disturbance_types.csv
├── orig
│ ├── aidb.db -> libcbm_aidb/countries/ZZ/orig/config/aidb.db
│ └── associations.csv
└── silv
├── product_types.csv
└── silvicultural_practices.csv
"""
#------------------------------ File lists -------------------------------#
common_list = ['disturbance_types.csv', 'classifiers.csv',
'age_classes.csv']
silv_list = ['product_types.csv', 'silvicultural_practices.csv']
config_list = ['associations.csv', 'aidb.db']
mgmt_list = ['events.csv', 'inventory.csv', 'transitions.csv',
'growth_curves.csv']
activities = ['afforestation', 'deforestation', 'mgmt', 'nd_nsr', 'nd_sr']
#------------------------------ Autopaths --------------------------------#
old_all_paths = """
/orig/
/orig/csv/
/orig/csv/age_classes.csv
/orig/csv/classifiers.csv
/orig/csv/disturbance_types.csv
/orig/csv/events.csv
/orig/csv/inventory.csv
/orig/csv/transitions.csv
/orig/csv/growth_curves.csv
/orig/config/
/orig/config/associations.csv
/orig/config/aidb.db
"""
new_all_paths = """
/common/
/common/age_classes.csv
/common/classifiers.csv
/common/disturbance_types.csv
/activities/
/activities/mgmt/events.csv
/activities/mgmt/inventory.csv
/activities/mgmt/transitions.csv
/activities/mgmt/growth_curves.csv
/config/
/config/associations.csv
/config/aidb.db
/silv/
/silv/product_types.csv
/silv/silvicultural_practices.csv
"""
#--------------------------- Special Methods -----------------------------#
def __repr__(self):
return '%s object code "%s"' % (self.__class__, self.country.iso2_code)
def __init__(self, country):
# Default attributes #
self.country = country
# AutoPaths #
self.old_paths = AutoPaths(self.country.data_dir, self.old_all_paths)
self.new_paths = AutoPaths(self.country.data_dir, self.new_all_paths)
def __call__(self):
# Move existing files
self.move_stuff()
# Create empty files for all possible activities #
self.create_activities()
# Switch events files to the wide format #
self.make_events_wide()
# Add the scenario column to every file #
self.add_scen_column()
# Fix the transitions file #
self.restore_header()
#------------------------------- Methods ---------------------------------#
def move_stuff(self):
# Common #
for item in self.common_list:
self.old_paths[item].move_to(self.new_paths[item])
# Silv #
for item in self.silv_list:
self.new_paths[item].touch()
# Config #
for item in self.config_list:
self.old_paths[item].move_to(self.new_paths[item])
# Mgmt #
for item in self.mgmt_list:
self.old_paths[item].move_to(self.new_paths[item])
# Remove old directories #
self.country.data_dir.remove_empty_dirs()
def create_activities(self):
# Other activities #
for act in self.activities:
if act == 'mgmt': continue
for item in self.mgmt_list:
directory = self.new_paths.activities_dir + act + '/'
directory.create_if_not_exists()
file = directory + item
file.touch()
def make_events_wide(self):
# The path to the mgmt events file #
path = self.new_paths.events
# Read it #
long = pandas.read_csv(str(path))
# Get a pre-processor #
pre_proc = PreProcessor(type('X', (), {'country': self.country}))
# Transform it #
wide = pre_proc.events_long_to_wide(long)
# Write it #
wide.to_csv(str(path), index=False)
# Return #
return str(path)
def add_scen_column(self):
# LU was already done previously #
if self.country.iso2_code == 'LU': return
# The files #
files_to_be_modify = ['growth_curves',
'transitions',
'inventory']
# Create the four dynamic files #
for input_file in files_to_be_modify:
# The path to the file that we will modify #
path = self.new_paths[input_file]
# Read the file #
df = pandas.read_csv(str(path))
# Add column #
df.insert(0, 'scenario', 'reference')
# Write output #
df.to_csv(str(path), index=False, float_format='%g')
def restore_header(self):
"""
In a pandas dataframe, the column names have to be unique, because
they are implemented as an index. However in the file
"transition_rules.csv", column names are repeated.
So we have to restore these headers afterwards.
"""
# Read from disk #
header = self.new_paths.transitions.first
# Modify #
header = header.split(',')
header = [n.replace('.1', '') for n in header]
header = ','.join(header)
# Write to disk #
self.new_paths.transitions.remove_first_line()
self.new_paths.transitions.prepend(header)
#------------------------- The flat symlinks -----------------------------#
@property
def country_interface_dir(self):
return interface_dir + self.country.iso2_code + '/'
@property
def interface_base(self):
return self.country_interface_dir + self.country.iso2_code + '_'
def make_interface(self, hardlinks=True, debug=False):
"""
This method can create symlinks to the input files in a flat hierarchy,
in essence providing a user interface to the input data.
This was originally developed to be compatible with Excel. The Excel
software has the ridiculous limitation of not being able to open two
files with the same name.
Moreover, in the case of windows, symbolic links don't overcome this
limitation and Excel still complains when it opens two symbolic links that
point to different files.
This issue is not fixed by using hard links instead of symbolic links.
This is because Excel never modifies a given file. When saving, it creates
a temporary file in the same directory, then deletes the original file and
renames the temporary file to the name of the original file. This destroys
the hard links upon every save operation.
"""
# Create the directory #
self.country_interface_dir.create_if_not_exists()
# Same case for all of: "Common, Silv, Config" #
for item in self.common_list + self.silv_list + self.config_list:
file = self.new_paths[item]
dest = self.interface_base + 'config_' + file.name
dest.remove()
if debug: print(str(file), " -> ", str(dest))
if hardlinks: os.link(str(file), str(dest))
else: file.link_to(dest)
# Different case for "Activities" #
for subdir in self.new_paths.activities_dir.flat_directories:
act = subdir.name
for file in subdir.flat_files:
dest = self.interface_base + act + '_' + file.name
dest.remove()
if debug: print(str(file), " -> ", str(dest))
if hardlinks: os.link(str(file), str(dest))
else: file.link_to(dest)
# Return #
return self.interface_base
#------------------------ Copying files back -----------------------------#
def save_interface(self, debug=False):
"""
In the end, the only way to make this `interface` work is to have a script
copy every file in the flat hierarchy back to it's expected place within
the `libcbm_data` repository.
"""
# Same case for all of: "Common, Silv, Config" #
for item in self.common_list + self.silv_list + self.config_list:
file = self.new_paths[item]
source = self.interface_base + 'config_' + file.name
if debug: print(str(source), " -> ", str(file))
source.copy_to(file)
# Different case for "Activities" #
for subdir in self.new_paths.activities_dir.flat_directories:
act = subdir.name
for file in subdir.flat_files:
source = self.interface_base + act + '_' + file.name
if debug: print(str(source), " -> ", str(file))
source.copy_to(file)
# Return #
return self.interface_base
###############################################################################
makers = [MakeActivities(c) for c in continent]
if __name__ == '__main__': print([maker() for maker in tqdm(makers)])
|
the-stack_0_7534 | from lib.hosts import get_host_id
from lib.network import send_json
from train.client.config import get_model_type
from train.events import events
async def register_host_id(websocket):
"""
Sends a hostID registration event on the provided websocket.
"""
host_id = get_host_id()
await send_json(websocket, events.START_REGISTRATION, {"hostID": host_id, "modelType": get_model_type()})
async def request_route(websocket):
"""
Sends a routeID request event on the provided websocket.
"""
await send_json(websocket, events.ROUTE_REQUEST)
async def upload_performance_metrics(websocket, metrics):
"""
Uploads performance metrics for the current model to the websocket.
"""
await send_json(websocket, events.METRICS_UPLOAD, {'metrics': metrics})
async def upload_best_parameter_set(websocket, result):
"""
Uploads performance metrics for the current model to the websocket.
"""
await send_json(websocket, events.PARAMETER_SET_UPLOAD, result)
async def complete_route(websocket, route_id):
"""
Sends a route completion request to the websocket provided.
"""
await send_json(websocket, events.ROUTE_COMPLETE, {'route_id': route_id})
|
the-stack_0_7539 | from damster.utils import initialize_logger
import re
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from pprint import pformat
log = initialize_logger(__name__)
def _sanitize_url(url):
u = urlparse(url)
return u.netloc.split(':')[0].replace('.', '_')
class BambooBuildAgentsMetrics(object):
AGENTS_BUILDING = re.compile(r'(?P<online>\d+)(?: of (?P<busy>\d+))? online agents(?: building)?')
def __init__(self, bamboo_client, influx_client):
self.cli = bamboo_client
self.influx = influx_client
def agent_status(self, metric_name):
try:
agents = self.cli.agent_status()
log.debug('Agent Status:\n{}'.format(pformat(agents)))
metrics = list()
for agent in agents:
metrics.append(dict(
measurement=metric_name,
tags=dict(
agent_name=agent['name'].replace(' ', '_'),
type=agent['type'],
busy=agent['busy'],
enabled=agent['enabled'],
active=agent['active'],
),
fields=dict(
value=1
)
))
self.influx.write_points(metrics)
except ValueError as e:
log.error('Agent Status threw a ValueError: {}'.format(e))
except Exception as e:
log.error('Agent Status threw an Exception: {}'.format(e))
def activity(self, metric_name, tags=None):
try:
activity = self.cli.activity()
log.debug('Build Activity:\n{}'.format(pformat(activity)))
building = [b for b in activity['builds'] if b['status'] == 'BUILDING']
queued = [b for b in activity['builds'] if b['status'] == 'QUEUED']
total_building = len(building)
local_building = len([b for b in building if b['agent']['type'] == 'LOCAL'])
remote_building = len([b for b in building if b['agent']['type'] == 'REMOTE'])
total_queued = len(queued)
try:
search = re.search(self.AGENTS_BUILDING, activity['agentSummary'])
agents_building = int(search.groupdict('0')['busy'])
agents_online = int(search.groupdict('0')['online'])
except AttributeError:
log.error('Error parsing agentSummary')
agents_building = 0
agents_online = 0
metric = dict(
measurement=metric_name,
tags=dict(
host=_sanitize_url(self.cli.url)
),
fields=dict(
total_building=total_building,
local_building=local_building,
remote_building=remote_building,
queued=total_queued,
agents_building=agents_building,
agents_online=agents_online
)
)
if tags:
metric['tags'].update(tags)
log.debug('Metric:\n{}'.format(pformat(metric)))
self.influx.write_points([metric])
except Exception as e:
log.error('Bamboo Activity threw an Exception: {}'.format(e))
|
the-stack_0_7541 | """Example of a highway section network with on/off ramps."""
from flow.envs.highway_ramps_env import HighwayRampsEnv
from flow.envs.multiagent import highway
from flow.controllers import car_following_models
from flow.controllers.car_following_models import IDMController, LACController
from flow.core.params import SumoParams, EnvParams, NetParams, InitialConfig
from flow.core.params import SumoCarFollowingParams, SumoLaneChangeParams
from flow.core.params import InFlows, VehicleParams, TrafficLightParams
from flow.networks.highway_ramps_crystal import ADDITIONAL_NET_PARAMS
from flow.envs.ring.accel import AccelEnv, ADDITIONAL_ENV_PARAMS
from flow.networks import HighwayRampsNetwork_Crystal
# inflow rates in vehs/hour
TOTAL_FLOW_RATE = 15000
CAV_RATE = 0.2
ZERO_RATE = 0.2
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
# acceleration_controller=(IDMController, {"T":1, "v0":25}),
car_following_params=SumoCarFollowingParams(
speed_mode="no_collide", # for safer behavior at the merges
tau=1.5, # larger distance between cars
decel=4.5,
max_speed=31,
),
lane_change_params=SumoLaneChangeParams(lane_change_mode=1621),
color = "white"
)
vehicles.add(
veh_id="cav_zero",
# acceleration_controller=(LACController, {}),
car_following_params=SumoCarFollowingParams(
speed_mode="obey_safe_speed", # for safer behavior at the merges
tau=0.3, # larger distance between cars
accel=3.5,
decel=6,
sigma=0.1,
min_gap=1,
max_speed=40,
),
lane_change_params=SumoLaneChangeParams(
lane_change_mode=1621,
lc_speed_gain=3),
vClass="hov",
color = "red"
)
vehicles.add(
veh_id="cav",
# acceleration_controller=(LACController, {}),
car_following_params=SumoCarFollowingParams(
speed_mode="no_collide", # for safer behavior at the merges
tau=0.8, # larger distance between cars
decel=4.5,
sigma=0.1,
min_gap=2,
max_speed=31
),
lane_change_params=SumoLaneChangeParams(lane_change_mode=1621),
# vClass="hov",
color = "yellow"
)
additional_net_params = ADDITIONAL_NET_PARAMS.copy()
additional_net_params["next_off_ramp_proba"] = 0.1
ON_RAMPS_INFLOW_RATE = TOTAL_FLOW_RATE * additional_net_params["next_off_ramp_proba"]
HIGHWAY_INFLOW_RATE = TOTAL_FLOW_RATE - ON_RAMPS_INFLOW_RATE
# lengths
additional_net_params["highway_length"] = 8000
additional_net_params["on_ramps_length"] = 300
additional_net_params["off_ramps_length"] = 300
# number of lanes
additional_net_params["highway_lanes"] = 4
additional_net_params["on_ramps_lanes"] = 1
additional_net_params["off_ramps_lanes"] = 1
# speed limits
additional_net_params["highway_speed"] = 30
additional_net_params["on_ramps_speed"] = 20
additional_net_params["off_ramps_speed"] = 20
# ramps
additional_net_params["on_ramps_pos"] = [500, 3000, 5500]
additional_net_params["off_ramps_pos"] = [2500, 5000, 7500]
# zero-occupancy lane
additional_net_params["zero_lanes"] = 0
assert additional_net_params["zero_lanes"]<additional_net_params["highway_lanes"]
additional_net_params["highway_zero_car_following"] = dict()
for i in range(additional_net_params["highway_lanes"]):
if i < additional_net_params["highway_lanes"] - additional_net_params["zero_lanes"]:
additional_net_params["highway_zero_car_following"][str(i)] = {"T": 1, "v0": 20}
else:
additional_net_params["highway_zero_car_following"][str(i)] = {"T": 0.5, "v0": 40}
additional_net_params["ramps_zero_car_following"] = {"T": 1, "v0": 20}
# additional_net_params["allow"] = dict()
# for i in range(additional_net_params["highway_lanes"]):
# if i < additional_net_params["zero_occupancy_lanes"]:
# additional_net_params["allow"][str(i)] = "cav_zero"
# else:
# additional_net_params["allow"][str(i)] = "all"
inflows = InFlows()
inflows.add(
veh_type="human",
edge="highway_0",
vehs_per_hour=(1-CAV_RATE)*HIGHWAY_INFLOW_RATE,
depart_lane="allowed",
depart_speed="max",
name="highway_human")
inflows.add(
veh_type="cav",
edge="highway_0",
vehs_per_hour=CAV_RATE*(1-ZERO_RATE)*HIGHWAY_INFLOW_RATE,
depart_lane="allowed",
depart_speed="max",
name="highway_cav")
inflows.add(
veh_type="cav_zero",
edge="highway_0",
vehs_per_hour=CAV_RATE*ZERO_RATE*HIGHWAY_INFLOW_RATE,
depart_lane="allowed",
depart_speed="max",
name="highway_zero")
for i in range(len(additional_net_params["on_ramps_pos"])):
inflows.add(
veh_type="human",
edge="on_ramp_{}".format(i),
vehs_per_hour=(1-CAV_RATE)*ON_RAMPS_INFLOW_RATE,
depart_lane="first",
depart_speed="max",
name="on_ramp_human")
inflows.add(
veh_type="cav",
edge="on_ramp_{}".format(i),
vehs_per_hour=CAV_RATE*(1-ZERO_RATE)*ON_RAMPS_INFLOW_RATE,
depart_lane="first",
depart_speed="max",
name="on_ramp_cav")
inflows.add(
veh_type="cav_zero",
edge="on_ramp_{}".format(i),
vehs_per_hour=CAV_RATE*ZERO_RATE*ON_RAMPS_INFLOW_RATE,
depart_lane="first",
depart_speed="max",
name="on_ramp_zero")
flow_params = dict(
# name of the experiment
exp_tag='highway-ramp',
# name of the flow environment the experiment is running on
env_name=HighwayRampsEnv,
# name of the network class the experiment is running on
network=HighwayRampsNetwork_Crystal,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
render=True,
emission_path="/home/cwang717/git/flow/output/crystal/sc33",
sim_step=0.1,
restart_instance=True,
minigap_factor = 0
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
additional_params=ADDITIONAL_ENV_PARAMS,
horizon=3000,
sims_per_step=1,
warmup_steps=3000
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=NetParams(
inflows=inflows,
additional_params=additional_net_params
),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=InitialConfig(),
# traffic lights to be introduced to specific nodes (see
# flow.core.params.TrafficLightParams)
tls=TrafficLightParams(),
)
|
the-stack_0_7544 | import os
from jobControl import jobControl
from pyspark.sql import SparkSession
from pyspark.sql import functions as f
from pyspark.sql.types import DecimalType, IntegerType
from utils import arg_utils, dataframe_utils, date_utils
job_args = arg_utils.get_job_args()
job_name = os.path.basename(__file__).split(".")[0]
num_partitions = 6
jobExec = jobControl.Job(job_name, job_args)
jobExec.target_schema = (
jobExec.target_schema if jobExec.target_schema else jobExec.database_edw
)
def main():
table_columns = dataframe_utils.return_hive_table_columns(
spark, jobExec.target_schema, jobExec.target_table
)
# Reading source table
udf_quarter_last_date = f.udf(
lambda x, y: date_utils.quarter_last_date(x, y), IntegerType()
)
df_currency_rates = (
spark.table("{}.currency_rates".format(jobExec.database_replica_full))
.select(
"id",
"to_currency_id",
"from_currency_id",
"considered_at",
f.col("rate").cast(DecimalType(16, 8)),
)
.filter(f.col("from_currency_id") == 840)
)
df_currencies = spark.table(
"{}.currencies".format(jobExec.database_replica_full)
).select("id", "short_title")
df_countries = (
spark.table("{}.countries".format(jobExec.database_replica_full))
.select(
"id",
"title",
f.upper(f.col("short_title").substr(1, 150)).alias("country_short_title"),
"default_currency_id",
"enabled",
)
.filter(f.col("enabled") == 1)
)
# Transform
df_dim_currencies_quarter = (
df_currency_rates.withColumn(
"date_id",
udf_quarter_last_date(
df_currency_rates["considered_at"],
f.quarter(df_currency_rates["considered_at"]),
),
)
.join(
df_currencies,
df_currencies["id"] == df_currency_rates["to_currency_id"],
"inner",
)
.join(
df_countries,
df_countries["default_currency_id"] == df_currencies["id"],
"inner",
)
.groupBy(
"date_id",
df_currency_rates["from_currency_id"],
df_currency_rates["to_currency_id"],
df_currencies["short_title"],
df_countries["country_short_title"],
)
.agg(
f.max(df_countries["id"]).alias("country_id"),
f.max(df_countries["title"]).alias("country_title"),
f.max(df_countries["country_short_title"]),
f.avg(1 / df_currency_rates["rate"]).alias("usd_rate"),
f.avg(df_currency_rates["rate"]).alias("usd_value"),
)
.select(
f.lit("quarter").alias("time_period"),
"date_id",
"country_id",
"country_title",
"country_short_title",
df_currency_rates["to_currency_id"].alias("currency_id"),
df_currencies["short_title"].alias("currency_title"),
"usd_rate",
"usd_value",
)
)
df_dim_currencies_month = (
df_currency_rates.withColumn(
"date_id",
(
f.concat(
f.date_format(df_currency_rates["considered_at"], "yyyyMM"),
f.lit("01"),
)
).cast(IntegerType()),
)
.join(
df_currencies,
df_currencies["id"] == df_currency_rates["to_currency_id"],
"inner",
)
.join(
df_countries,
df_countries["default_currency_id"] == df_currencies["id"],
"inner",
)
.groupBy(
"date_id",
df_currency_rates["from_currency_id"],
df_currency_rates["to_currency_id"],
df_currencies["short_title"],
df_countries["country_short_title"],
)
.agg(
f.max(df_countries["id"]).alias("country_id"),
f.max(df_countries["title"]).alias("country_title"),
f.max(df_countries["country_short_title"]),
f.avg(1 / df_currency_rates["rate"]).alias("usd_rate"),
f.avg(df_currency_rates["rate"]).alias("usd_value"),
)
.select(
f.lit("month").alias("time_period"),
"date_id",
"country_id",
"country_title",
"country_short_title",
df_currency_rates["to_currency_id"].alias("currency_id"),
df_currencies["short_title"].alias("currency_title"),
"usd_rate",
"usd_value",
)
)
df_currency_rates_month_end = (
df_currency_rates.withColumn(
"date_id",
f.date_format(f.col("considered_at"), "yyyyMMdd").cast(IntegerType()),
)
.filter(
f.col("date_id")
== (
f.date_format(
f.last_day(df_currency_rates["considered_at"]), "yyyyMMdd"
).cast(IntegerType())
)
)
.groupBy(
"date_id",
df_currency_rates["to_currency_id"],
df_currency_rates["from_currency_id"],
)
.agg(f.max(f.col("id")).alias("id"))
.select(f.col("id"), f.col("date_id"))
)
df_dim_currencies_month_end = (
df_currency_rates_month_end.join(
df_currency_rates,
df_currency_rates_month_end["id"] == df_currency_rates["id"],
"inner",
)
.join(
df_currencies,
df_currencies["id"] == df_currency_rates["to_currency_id"],
"inner",
)
.join(
df_countries,
df_countries["default_currency_id"] == df_currencies["id"],
"inner",
)
.select(
f.lit("month_end").alias("time_period"),
df_currency_rates_month_end["date_id"],
df_countries["id"].alias("country_id"),
df_countries["title"].alias("country_title"),
df_countries["country_short_title"],
df_currency_rates["to_currency_id"].alias("currency_id"),
df_currencies["short_title"].alias("currency_title"),
(1 / df_currency_rates["rate"]).alias("usd_rate"),
df_currency_rates["rate"].alias("usd_value"),
)
)
df_dim_currencies = df_dim_currencies_quarter.union(df_dim_currencies_month).union(
df_dim_currencies_month_end
)
df_dim_currencies = jobExec.select_dataframe_columns(
spark, df_dim_currencies, table_columns
)
df_dim_currencies = df_dim_currencies.repartition(num_partitions, "date_id")
df_dim_currencies.write.insertInto(
"{}.{}".format(jobExec.target_schema, jobExec.target_table),
overwrite=True,
)
jobExec.totalLines = (
spark.table("{}.{}".format(jobExec.target_schema, jobExec.target_table))
).count()
if __name__ == "__main__":
spark = (
SparkSession.builder.appName(job_name)
.config("spark.sql.parquet.writeLegacyFormat", "true")
.enableHiveSupport()
.getOrCreate()
)
jobExec.execJob(main, spark, add_hive_path=True, delete_excessive_files=True)
|
the-stack_0_7546 | """Module containing tools to calculate statistics of collective events.
Example:
>>> from arcos4py.tools import calcCollevStats
>>> test = calcCollevStats()
>>> out = test.calculate(data = data,frame_column = "frame", collid_column = "collid")
"""
from typing import Union
import numpy as np
import pandas as pd
class calcCollevStats:
"""Class to calculate statistics of collective events."""
def __init__(self) -> None:
"""Class to calculate statistics of collective events."""
pass
def _calculate_duration_size_group(self, data: np.ndarray) -> np.ndarray:
"""Calculates duration and size for the collective event in the dataframe.
Arguments:
data (np.ndarray): Containing a single collective event.
Returns:
np.ndarray: Array containing the collective event id, duration, tot_size, min_size,
max_size, "nd_frame, first_frame_centroid and last_frame_centroid
of the current collective event.
"""
coll_dur = max(data[:, 0]) - min(data[:, 0]) + 1
coll_total_size = np.unique(data[:, 1]).size
(unique, counts) = np.unique(data[:, 0], return_counts=True)
frequencies = np.asarray((unique, counts)).T
coll_min_size = np.min(frequencies[:, 1])
coll_max_size = np.max(frequencies[:, 1])
coll_start_frame = np.min(data[:, 0])
coll_end_frame = np.max(data[:, 0])
if data.shape[1] > 3:
coll_start_coord = np.mean(data[(data[:, 0] == coll_start_frame)][:, 3:], axis=0)
coll_end_coord = np.mean(data[(data[:, 0] == coll_end_frame)][:, 3:], axis=0)
else:
coll_start_coord = np.nan
coll_end_coord = np.nan
d = np.array(
[
data[0, 2],
coll_dur,
coll_total_size,
coll_min_size,
coll_max_size,
coll_start_frame,
coll_end_frame,
coll_start_coord,
coll_end_coord,
],
dtype=object,
)
return d
def _get_collev_duration(
self,
data: pd.DataFrame,
frame_column: str,
collev_id: str,
obj_id_column: str,
posCol: Union[list, None],
) -> pd.DataFrame:
"""Applies self._calculate_duration_size_group() to every group\
i.e. every collective event.
Arguments:
data (DataFrame): Containing unfiltered collective events.
collev_id (str): Indicating the contained collective id column.
frame_column (str): Indicating the contained frame column.
obj_id_column (str): Indicating object id.
posCol (list | None): Contains names of position columns. If None coordinates of
start and end frame are not calcualted
Returns:
DataFrame: DataFrame containing "collid", "duration", "total_size",
"min_size","max_size", "start_frame", "end_frame",
"first_frame_centroid" and "last_frame_centroid"
of all collective events.
"""
cols = [
collev_id,
"duration",
"total_size",
"min_size",
"max_size",
"start_frame",
"end_frame",
"first_frame_centroid",
"last_frame_centroid",
]
subset = [frame_column, obj_id_column, collev_id]
if posCol:
subset.extend(posCol)
# if object id cannot be converted to a float, generate unique labels with pd.factorize
try:
data_np = data[subset].to_numpy(dtype=np.float64)
except ValueError:
labels, levels = pd.factorize(data[obj_id_column])
new_obj_id = f'{obj_id_column}_labels'
data[new_obj_id] = labels
subset[1] = new_obj_id
data_np = data[subset].to_numpy(dtype=np.float64)
data_np = data_np[~np.isnan(data_np).any(axis=1)]
data_np_sorted = data_np[data_np[:, 2].argsort()]
grouped_array = np.split(data_np_sorted, np.unique(data_np_sorted[:, 2], axis=0, return_index=True)[1][1:])
# map to grouped_array
out = map(self._calculate_duration_size_group, grouped_array)
out_list = [i for i in out]
df = pd.DataFrame(out_list, columns=cols)
return df
def calculate(
self,
data: pd.DataFrame,
frame_column: str,
collid_column: str,
obj_id_column: str,
posCol: Union[list, None] = None,
) -> pd.DataFrame:
"""Calculate statistics of collective events.
Arguments:
data (DataFrame): Containing collective events.
frame_column (str): Indicating the frame column in data.
collid_column (str): Indicating the collective event id column in data.
obj_id_column (str): Indicating object id.
posCol (list | None): Contains names of position columns. If None coordinates of
start and end frame are not calcualted
Returns:
DataFrame: DataFrame containing "collid", "duration", "total_size",
"min_size","max_size", "start_frame", "end_frame",
"first_frame_centroid" and "last_frame_centroid"
of all collective events.
"""
if data.empty:
return data
colev_stats = self._get_collev_duration(data, frame_column, collid_column, obj_id_column, posCol)
return colev_stats
|
the-stack_0_7548 | """Utilities for managing string types and encoding."""
import six
def force_unicode(string, encoding='utf-8'):
"""Force a given string to be a unicode type.
Args:
string (bytes or unicode):
The string to enforce.
Returns:
unicode:
The string as a unicode type.
Raises:
ValueError:
The given string was not a supported type.
"""
if isinstance(string, six.text_type):
return string
elif isinstance(string, bytes):
return string.decode(encoding)
else:
raise ValueError('Provided string was neither bytes nor unicode')
|
the-stack_0_7549 | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="funnelarea.title.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
|
the-stack_0_7550 | import builtins
import dataclasses
from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Type, cast
from pydantic import BaseModel
from pydantic.fields import ModelField
from strawberry.arguments import UNSET
from strawberry.experimental.pydantic.conversion import (
convert_pydantic_model_to_strawberry_class,
)
from strawberry.experimental.pydantic.fields import get_basic_type
from strawberry.field import StrawberryField
from strawberry.object_type import _process_type, _wrap_dataclass
from strawberry.private import Private
from strawberry.types.type_resolver import _get_fields
from strawberry.types.types import FederationTypeParams, TypeDefinition
from .exceptions import MissingFieldsListError, UnregisteredTypeException
def replace_pydantic_types(type_: Any):
if hasattr(type_, "__args__"):
new_type = type_.copy_with(
tuple(replace_pydantic_types(t) for t in type_.__args__)
)
if isinstance(new_type, TypeDefinition):
# TODO: Not sure if this is necessary. No coverage in tests
# TODO: Unnecessary with StrawberryObject
new_type = builtins.type(
new_type.name,
(),
{"_type_definition": new_type},
)
return new_type
if issubclass(type_, BaseModel):
if hasattr(type_, "_strawberry_type"):
return type_._strawberry_type
else:
raise UnregisteredTypeException(type_)
return type_
def get_type_for_field(field: ModelField):
type_ = field.outer_type_
type_ = get_basic_type(type_)
type_ = replace_pydantic_types(type_)
if not field.required:
type_ = Optional[type_]
return type_
def _get_private_fields(cls: Type) -> List[dataclasses.Field]:
private_fields: List[dataclasses.Field] = []
for field in dataclasses.fields(cls):
if isinstance(field.type, Private):
private_fields.append(field)
return private_fields
def type(
model: Type[BaseModel],
*,
fields: List[str],
name: Optional[str] = None,
is_input: bool = False,
is_interface: bool = False,
description: Optional[str] = None,
federation: Optional[FederationTypeParams] = None,
):
def wrap(cls):
if not fields:
raise MissingFieldsListError(model)
model_fields = model.__fields__
fields_set = set(fields)
all_fields: List[Tuple[str, Any, dataclasses.Field]] = [
(
name,
get_type_for_field(field),
StrawberryField(
python_name=field.name,
graphql_name=field.alias if field.has_alias else None,
default=field.default if not field.required else UNSET,
default_factory=(
field.default_factory if field.default_factory else UNSET
),
type_annotation=get_type_for_field(field),
),
)
for name, field in model_fields.items()
if name in fields_set
]
wrapped = _wrap_dataclass(cls)
extra_fields = cast(List[dataclasses.Field], _get_fields(wrapped))
private_fields = _get_private_fields(wrapped)
all_fields.extend(
(
(
field.name,
field.type,
field,
)
for field in extra_fields + private_fields
)
)
# Sort fields so that fields with missing defaults go first
# because dataclasses require that fields with no defaults are defined
# first
missing_default = []
has_default = []
for field in all_fields:
if field[2].default is dataclasses.MISSING:
missing_default.append(field)
else:
has_default.append(field)
sorted_fields = missing_default + has_default
cls = dataclasses.make_dataclass(
cls.__name__,
sorted_fields,
)
_process_type(
cls,
name=name,
is_input=is_input,
is_interface=is_interface,
description=description,
federation=federation,
)
model._strawberry_type = cls # type: ignore
cls._pydantic_type = model # type: ignore
def from_pydantic(instance: Any, extra: Dict[str, Any] = None) -> Any:
return convert_pydantic_model_to_strawberry_class(
cls=cls, model_instance=instance, extra=extra
)
def to_pydantic(self) -> Any:
instance_kwargs = dataclasses.asdict(self)
return model(**instance_kwargs)
cls.from_pydantic = staticmethod(from_pydantic)
cls.to_pydantic = to_pydantic
return cls
return wrap
input = partial(type, is_input=True)
|
the-stack_0_7551 | import numpy as np
import matplotlib.pylab as plt
class AnimAcross:
'''
Helper class for making subplots,
useful if you're not quite sure how many
subplots you'll be needing
'''
def __init__(self,ratio=.8,sz=4,columns=None,aa=None,asp=1.0):
self.aa=aa
self.axes_list=[]
self.cbs={}
self.ratio=ratio
self.sz=sz
self.columns=columns
self.asp=asp
def __enter__(self):
if self.aa is not None:
return self.aa
else:
return self
def __pos__(self):
self.axes_list.append(
plt.gcf().add_axes(
[0,0,self.ratio,self.ratio],
label="axis%d"%len(self.axes_list),
projection='3d',
))
def __invert__(self):
self.axes_list.append(plt.gcf().add_axes([0,0,self.ratio,self.ratio],label="axis%d"%len(self.axes_list)))
def __neg__(self):
self.axes_list.append(plt.gcf().add_axes([0,0,self.ratio,self.ratio],label="axis%d"%len(self.axes_list)))
plt.axis('off')
def __call__(self,s,*args,**kwargs):
~self
plt.title(s,*args,**kwargs)
def cb(self,mappable,idx=None):
if idx is None:
idx = len(self.axes_list)-1
self.cbs[idx] = mappable
def __exit__(self,exc_type,exc_val,exc_tb):
if self.aa is not None:
return
if self.columns is None:
dims=[
(1,1), # no plots
(1,1), # 1 plot
(1,2), # 2 plots
(1,3), # 3 plots
(2,2), # 4 plots
(2,3), # 5 plots
(2,3), # 6 plots
(3,3), # 7 plots
(3,3), # 8 plots
(3,3), # 9 plots
(4,4)
]
if len(self.axes_list)<len(dims):
dims=dims[len(self.axes_list)]
else:
cols=int(np.sqrt(len(self.axes_list)))+1
rows = len(self.axes_list)//cols + 1
dims=(rows,cols)
else:
cols=self.columns
if len(self.axes_list)%cols==0:
rows=len(self.axes_list)//cols
else:
rows=len(self.axes_list)//cols + 1
dims=(rows,cols)
k=0
for j in range(dims[0]):
for i in range(dims[1]):
if k<len(self.axes_list):
self.axes_list[k].set_position((i,dims[0]-j-1,self.ratio,self.ratio))
k=k+1
plt.gcf().set_size_inches(self.sz,self.sz*self.asp)
for i in range(len(self.axes_list)):
if i in self.cbs:
plt.colorbar(mappable=self.cbs[i],ax=self.axes_list[i])
if exc_type is not None:
print(exc_type,exc_val,exc_tb)
|
the-stack_0_7552 | """Module for the StreamRoles cog."""
# Copyright (c) 2017-2018 Tobotimus
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import contextlib
import logging
from typing import List, Optional, Tuple, Union
import discord
from redbot.core import Config, checks, commands
from redbot.core.bot import Red
from redbot.core.utils import chat_formatting as chatutils, menus, predicates
from .types import FilterList
log = logging.getLogger("red.streamroles")
UNIQUE_ID = 0x923476AF
_alerts_channel_sentinel = object()
class StreamRoles(commands.Cog):
"""Give current twitch streamers in your server a role."""
# Set using [p]eval or something rather and the streamrole will be assigned simply
# whenever someone is streaming, regardless of whether or not they have a linked
# Twitch account. Makes for easier black-box testing.
DEBUG_MODE = False
def __init__(self, bot: Red):
super().__init__()
self.bot: Red = bot
self.conf = Config.get_conf(self, force_registration=True, identifier=UNIQUE_ID)
self.conf.register_guild(
streamer_role=None,
game_whitelist=[],
mode=str(FilterList.blacklist),
alerts__enabled=False,
alerts__channel=None,
alerts__autodelete=True,
)
self.conf.register_member(
blacklisted=False, whitelisted=False, alert_messages={}
)
self.conf.register_role(blacklisted=False, whitelisted=False)
async def initialize(self) -> None:
"""Initialize the cog."""
for guild in self.bot.guilds:
await self._update_guild(guild)
@checks.admin_or_permissions(manage_roles=True)
@commands.guild_only()
@commands.group(autohelp=True, aliases=["streamroles"])
async def streamrole(self, ctx: commands.Context):
"""Manage settings for StreamRoles."""
pass
@streamrole.command()
async def setmode(self, ctx: commands.Context, *, mode: FilterList):
"""Set the user filter mode to blacklist or whitelist."""
await self.conf.guild(ctx.guild).mode.set(str(mode))
await self._update_guild(ctx.guild)
await ctx.tick()
@streamrole.group(autohelp=True)
async def whitelist(self, ctx: commands.Context):
"""Manage the whitelist."""
pass
@whitelist.command(name="add")
async def white_add(
self,
ctx: commands.Context,
*,
user_or_role: Union[discord.Member, discord.Role],
):
"""Add a member or role to the whitelist."""
await self._update_filter_list_entry(user_or_role, FilterList.whitelist, True)
await ctx.tick()
@whitelist.command(name="remove")
async def white_remove(
self,
ctx: commands.Context,
*,
user_or_role: Union[discord.Member, discord.Role],
):
"""Remove a member or role from the whitelist."""
await self._update_filter_list_entry(user_or_role, FilterList.whitelist, False)
await ctx.tick()
@checks.bot_has_permissions(embed_links=True)
@whitelist.command(name="show")
async def white_show(self, ctx: commands.Context):
"""Show the whitelisted members and roles in this server."""
members, roles = await self._get_filter_list(ctx.guild, FilterList.whitelist)
if not (members or roles):
await ctx.send("The whitelist is empty.")
return
embed = discord.Embed(
title="StreamRoles Whitelist", colour=await ctx.embed_colour()
)
if members:
embed.add_field(name="Members", value="\n".join(map(str, members)))
if roles:
embed.add_field(name="Roles", value="\n".join(map(str, roles)))
await ctx.send(embed=embed)
@streamrole.group(autohelp=True)
async def blacklist(self, ctx: commands.Context):
"""Manage the blacklist."""
pass
@blacklist.command(name="add")
async def black_add(
self,
ctx: commands.Context,
*,
user_or_role: Union[discord.Member, discord.Role],
):
"""Add a member or role to the blacklist."""
await self._update_filter_list_entry(user_or_role, FilterList.blacklist, True)
await ctx.tick()
@blacklist.command(name="remove")
async def black_remove(
self,
ctx: commands.Context,
*,
user_or_role: Union[discord.Member, discord.Role],
):
"""Remove a member or role from the blacklist."""
await self._update_filter_list_entry(user_or_role, FilterList.blacklist, False)
await ctx.tick()
@checks.bot_has_permissions(embed_links=True)
@blacklist.command(name="show")
async def black_show(self, ctx: commands.Context):
"""Show the blacklisted members and roles in this server."""
members, roles = await self._get_filter_list(ctx.guild, FilterList.blacklist)
if not (members or roles):
await ctx.send("The blacklist is empty.")
return
embed = discord.Embed(
title="StreamRoles Blacklist", colour=await ctx.embed_colour()
)
if members:
embed.add_field(name="Members", value="\n".join(map(str, members)))
if roles:
embed.add_field(name="Roles", value="\n".join(map(str, roles)))
await ctx.send(embed=embed)
@streamrole.group(autohelp=True)
async def games(self, ctx: commands.Context):
"""Manage the game whitelist.
Adding games to the whitelist will make the bot only add the streamrole
to members streaming those games. If the game whitelist is empty, the
game being streamed won't be checked before adding the streamrole.
"""
pass
@games.command(name="add")
async def games_add(self, ctx: commands.Context, *, game: str):
"""Add a game to the game whitelist.
This should *exactly* match the name of the game being played
by the streamer as shown in Discord or on Twitch.
"""
async with self.conf.guild(ctx.guild).game_whitelist() as whitelist:
whitelist.append(game)
await self._update_guild(ctx.guild)
await ctx.tick()
@games.command(name="remove")
async def games_remove(self, ctx: commands.Context, *, game: str):
"""Remove a game from the game whitelist."""
async with self.conf.guild(ctx.guild).game_whitelist() as whitelist:
try:
whitelist.remove(game)
except ValueError:
await ctx.send("That game is not in the whitelist.")
return
await self._update_guild(ctx.guild)
await ctx.tick()
@checks.bot_has_permissions(embed_links=True)
@games.command(name="show")
async def games_show(self, ctx: commands.Context):
"""Show the game whitelist for this server."""
whitelist = await self.conf.guild(ctx.guild).game_whitelist()
if not whitelist:
await ctx.send("The game whitelist is empty.")
return
embed = discord.Embed(
title="StreamRoles Game Whitelist",
description="\n".join(whitelist),
colour=await ctx.embed_colour(),
)
await ctx.send(embed=embed)
@games.command(name="clear")
async def games_clear(self, ctx: commands.Context):
"""Clear the game whitelist for this server."""
msg = await ctx.send(
"This will clear the game whitelist for this server. "
"Are you sure you want to do this?"
)
menus.start_adding_reactions(msg, predicates.ReactionPredicate.YES_OR_NO_EMOJIS)
pred = predicates.ReactionPredicate.yes_or_no(msg)
try:
message = await ctx.bot.wait_for("reaction_add", check=pred)
except asyncio.TimeoutError:
message = None
if message is not None and pred.result is True:
await self.conf.guild(ctx.guild).game_whitelist.clear()
await self._update_guild(ctx.guild)
await ctx.send("Done. The game whitelist has been cleared.")
else:
await ctx.send("The action was cancelled.")
@streamrole.group()
async def alerts(self, ctx: commands.Context):
"""Manage streamalerts for those who receive the streamrole."""
@alerts.command(name="setenabled")
async def alerts_setenabled(self, ctx: commands.Context, true_or_false: bool):
"""Enable or disable streamrole alerts."""
await self.conf.guild(ctx.guild).alerts.enabled.set(true_or_false)
await ctx.tick()
@alerts.command(name="setchannel")
async def alerts_setchannel(
self, ctx: commands.Context, channel: discord.TextChannel
):
"""Set the channel for streamrole alerts."""
await self.conf.guild(ctx.guild).alerts.channel.set(channel.id)
await ctx.tick()
@alerts.command(name="autodelete")
async def alerts_autodelete(self, ctx: commands.Context, true_or_false: bool):
"""Enable or disable alert autodeletion.
This is enabled by default. When enabled, alerts will be deleted
once the streamer's role is removed.
"""
await self.conf.guild(ctx.guild).alerts.autodelete.set(true_or_false)
await ctx.tick()
async def _get_filter_list(
self, guild: discord.Guild, mode: FilterList
) -> Tuple[List[discord.Member], List[discord.Role]]:
all_member_data = await self.conf.all_members(guild)
all_role_data = await self.conf.all_roles()
mode = mode.as_participle()
member_ids = (u for u, d in all_member_data.items() if d.get(mode))
role_ids = (u for u, d in all_role_data.items() if d.get(mode))
members = list(filter(None, map(guild.get_member, member_ids)))
roles = list(filter(None, map(guild.get_role, role_ids)))
return members, roles
async def _update_filter_list_entry(
self,
member_or_role: Union[discord.Member, discord.Role],
filter_list: FilterList,
value: bool,
) -> None:
if isinstance(member_or_role, discord.Member):
await self.conf.member(member_or_role).set_raw(
filter_list.as_participle(), value=value
)
await self._update_member(member_or_role)
else:
await self.conf.role(member_or_role).set_raw(
filter_list.as_participle(), value=value
)
await self._update_members_with_role(member_or_role)
@streamrole.command()
async def setrole(self, ctx: commands.Context, *, role: discord.Role):
"""Set the role which is given to streamers."""
await self.conf.guild(ctx.guild).streamer_role.set(role.id)
await ctx.send(
"Done. Streamers will now be given the {} role when "
"they go live.".format(role.name)
)
async def get_streamer_role(self, guild: discord.Guild) -> Optional[discord.Role]:
"""Get the streamrole for this guild.
Arguments
---------
guild : discord.Guild
The guild to retrieve the streamer role for.
Returns
-------
Optional[discord.Role]
The role given to streaming users in this guild. ``None``
if not set.
"""
role_id = await self.conf.guild(guild).streamer_role()
if not role_id:
return
try:
role = next(r for r in guild.roles if r.id == role_id)
except StopIteration:
return
else:
return role
async def get_alerts_channel(
self, guild: discord.Guild
) -> Optional[discord.TextChannel]:
"""Get the alerts channel for this guild.
Arguments
---------
guild : discord.Guild
The guild to retrieve the alerts channel for.
Returns
-------
Optional[discord.TextChannel]
The channel where alerts are posted in this guild. ``None``
if not set or enabled.
"""
alerts_data = await self.conf.guild(guild).alerts.all()
if not alerts_data["enabled"]:
return
return guild.get_channel(alerts_data["channel"])
async def _update_member(
self,
member: discord.Member,
role: Optional[discord.Role] = None,
alerts_channel: Optional[discord.TextChannel] = _alerts_channel_sentinel,
) -> None:
role = role or await self.get_streamer_role(member.guild)
if role is None:
return
channel = (
alerts_channel
if alerts_channel is not _alerts_channel_sentinel
else await self.get_alerts_channel(member.guild)
)
activity = next(
(
a
for a in member.activities
if a and a.type == discord.ActivityType.streaming
),
None,
)
if not (self.DEBUG_MODE or getattr(activity, "twitch_name", None)):
activity = None
has_role = role in member.roles
if activity and await self._is_allowed(member):
game = getattr(activity, "details", None)
games = await self.conf.guild(member.guild).game_whitelist()
if not games or game in games:
if not has_role:
log.debug("Adding streamrole %s to member %s", role.id, member.id)
await member.add_roles(role)
if channel:
await self._post_alert(member, activity, game, channel)
return
if has_role:
log.debug("Removing streamrole %s from member %s", role.id, member.id)
await member.remove_roles(role)
if channel and await self.conf.guild(member.guild).alerts.autodelete():
await self._remove_alert(member, channel)
async def _update_members_with_role(self, role: discord.Role) -> None:
streamer_role = await self.get_streamer_role(role.guild)
if streamer_role is None:
return
alerts_channel = await self.get_alerts_channel(role.guild)
if await self.conf.guild(role.guild).mode() == FilterList.blacklist:
for member in role.members:
if streamer_role in member.roles:
log.debug(
"Removing streamrole %s from member %s after role %s was "
"blacklisted",
streamer_role.id,
member.id,
role.id,
)
await member.remove_roles(
streamer_role,
reason=f"Removing streamrole after {role} role was blacklisted",
)
else:
for member in role.members:
await self._update_member(member, streamer_role, alerts_channel)
async def _update_guild(self, guild: discord.Guild) -> None:
streamer_role = await self.get_streamer_role(guild)
if streamer_role is None:
return
alerts_channel = await self.get_alerts_channel(guild)
for member in guild.members:
await self._update_member(member, streamer_role, alerts_channel)
async def _post_alert(
self,
member: discord.Member,
activity: discord.Activity,
game: Optional[str],
channel: discord.TextChannel,
) -> discord.Message:
content = f"{chatutils.bold(member.display_name)} is now live on Twitch"
if game is not None:
content += f", playing {chatutils.italics(str(game))}"
content += f":\n\n{chatutils.italics(activity.name)}\n\n{activity.url}"
msg = await channel.send(content)
await self.conf.member(member).alert_messages.set_raw(
str(channel.id), value=msg.id
)
return msg
async def _remove_alert(
self, member: discord.Member, channel: discord.TextChannel
) -> None:
conf_group = self.conf.member(member).alert_messages
msg_id = await conf_group.get_raw(str(channel.id), default=None)
if msg_id is None:
return
await conf_group.clear_raw(str(channel.id))
msg: Optional[discord.Message] = discord.utils.get(
getattr(self.bot, "cached_messages", ()), id=msg_id
)
if msg is None:
try:
msg = await channel.fetch_message(msg_id)
except discord.NotFound:
return
with contextlib.suppress(discord.NotFound):
await msg.delete()
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild) -> None:
"""Update any members in a new guild."""
await self._update_guild(guild)
@commands.Cog.listener()
async def on_member_update(
self, before: discord.Member, after: discord.Member
) -> None:
"""Apply or remove streamrole when a user's activity changes."""
if before.activity != after.activity:
await self._update_member(after)
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member) -> None:
"""Update a new member who joins."""
await self._update_member(member)
async def _is_allowed(self, member: discord.Member) -> bool:
if await self.conf.guild(member.guild).mode() == FilterList.blacklist:
return not await self._is_blacklisted(member)
else:
return await self._is_whitelisted(member)
async def _is_whitelisted(self, member: discord.Member) -> bool:
if await self.conf.member(member).whitelisted():
return True
for role in member.roles:
if await self.conf.role(role).whitelisted():
return True
return False
async def _is_blacklisted(self, member: discord.Member) -> bool:
if await self.conf.member(member).blacklisted():
return True
for role in member.roles:
if await self.conf.role(role).blacklisted():
return True
return False
|
the-stack_0_7553 | """
Pyinvoke tasks.py file for automating releases and admin stuff.
Author: Shyue Ping Ong
"""
from invoke import task
import glob
import os
import json
import webbrowser
import requests
import re
import subprocess
import datetime
from monty.os import cd
NEW_VER = datetime.datetime.today().strftime("%Y.%-m.%-d")
@task
def make_doc(ctx):
"""
Generate API documentation + run Sphinx.
:param ctx:
"""
# ctx.run("cp README.rst api-docs-source/index.rst")
ctx.run("cp CHANGES.rst api-docs-source/changelog.rst")
with cd("api-docs-source"):
ctx.run("rm maml.*.rst", warn=True)
ctx.run("sphinx-apidoc --separate -P -d 7 -o . -f ../maml")
ctx.run("rm maml*.tests.*rst", warn=True)
for f in glob.glob("maml*.rst"):
newoutput = []
with open(f, 'r') as fid:
for line in fid:
if re.search("maml.*\._.*", line):
continue
else:
newoutput.append(line)
with open(f, 'w') as fid:
fid.write("".join(newoutput))
ctx.run("rm maml*._*.rst")
ctx.run("rm -r docs", warn=True)
ctx.run("sphinx-build -b html api-docs-source docs")
# ctx.run("cp _static/* ../docs/html/_static", warn=True)
with cd("docs"):
ctx.run("rm -r .doctrees", warn=True)
# This makes sure maml.org works to redirect to the Github page
ctx.run("echo \"maml.ai\" > CNAME")
# Avoid the use of jekyll so that _dir works as intended.
ctx.run("touch .nojekyll")
@task
def update_doc(ctx):
"""
Update the web documentation.
:param ctx:
"""
ctx.run("cp README.rst docs/")
ctx.run("cp api-docs-source/conf.py docs/conf.py")
make_doc(ctx)
ctx.run("git add .")
ctx.run("git commit -a -m \"Update docs\"")
ctx.run("git push")
@task
def publish(ctx):
"""
Upload release to Pypi using twine.
:param ctx:
"""
ctx.run("rm dist/*.*", warn=True)
ctx.run("python setup.py sdist bdist_wheel")
ctx.run("twine upload dist/*")
@task
def set_ver(ctx):
lines = []
with open("setup.py", "rt") as f:
for l in f:
lines.append(re.sub(r'version=([^,]+),', 'version="%s",' % NEW_VER,
l.rstrip()))
with open("setup.py", "wt") as f:
f.write("\n".join(lines) + "\n")
@task
def release(ctx, notest=True):
set_ver(ctx)
#ctx.run("rm -r dist build maml.egg-info", warn=True)
if not notest:
ctx.run("pytest maml")
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split(r"\-+", contents)
desc = toks[1].strip()
toks = desc.split("\n")
desc = "\n".join(toks[:-1]).strip()
payload = {
"tag_name": "v" + NEW_VER,
"target_commitish": "master",
"name": "v" + NEW_VER,
"body": desc,
"draft": False,
"prerelease": False,
}
response = requests.post(
"https://api.github.com/repos/materialsvirtuallab/maml-app-rfxas/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]},
)
print(response.text)
|
the-stack_0_7555 | from keyphrase import KeyphraseModel
from keyphrase import ttypes
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
import json
import os
import subprocess
import sys
def write_to_input_file(article_list):
with open(os.path.join(DATA_PATH, INPUT_FILE), "w", encoding='utf-8') as writer:
for article in article_list:
writer.write("%s\n" % json.dumps({"title": article["title"], "text": article["text"], "id": article["id"]}, ensure_ascii=False))
def read_from_output_file():
results = []
with open(os.path.join(DATA_PATH, OUTPUT_FILE), "r", encoding='utf-8') as lines:
for line in lines:
line = line.strip()
if line:
results.append(json.loads(line))
return results
class KeyphrasesHandler(object):
def predict(self, articles):
article_list = [{"id": a.id, "title": a.title, "text": a.text} for a in articles]
write_to_input_file(article_list)
try:
subprocess.check_call(["python", "run_summarization.py", "--data_path=%s" % os.path.join(DATA_PATH, INPUT_FILE), "--decode_only=True"])
except subprocess.CalledProcessError:
return []
decode_results = read_from_output_file()
return [ttypes.Keyphrase(r["id"], r["keyphrases"]) for r in decode_results]
if __name__ == '__main__':
if len(sys.argv) > 1:
HOST = sys.argv[1]
else:
HOST = "192.168.101.4"
if len(sys.argv) > 2:
PORT = sys.argv[2]
else:
PORT = "8084"
DATA_PATH = '/tmp'
INPUT_FILE = 'tmp_input_%s.txt' % PORT
OUTPUT_FILE = 'tmp_output_%s.txt' % PORT
handler = KeyphrasesHandler()
processor = KeyphraseModel.Processor(handler)
transport = TSocket.TServerSocket(HOST, PORT)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
rpcServer = TServer.TSimpleServer(processor,transport, tfactory, pfactory)
print('Starting the rpc server at', HOST,':', PORT)
rpcServer.serve()
|
the-stack_0_7557 | #!/usr/bin/env python
# coding=utf-8
import os
import clara_bootstrap
from distutils.core import setup, Command
from setuptools.command.test import test as TestCommand
from setuptools import find_packages
class ClaraTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
pytest.main(self.test_args)
class ClaraClean(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./.cache ./.eggs ./build ./dist')
os.system('rm -vrf ./*.tgz ./*.egg-info')
os.system('find . -name "*.pyc" -exec rm -vrf {} \;')
os.system('find . -name "__pycache__" -exec rm -rf {} \;')
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme_file:
README = readme_file.read()
with open(os.path.join(os.path.dirname(__file__), 'LICENSE')) as license_file:
LICENSE = license_file.read()
if __name__ == "__main__":
setup(name='clara_bootstrap',
version=clara_bootstrap.__version__,
description='Clara bootstrap scripts',
author='Ricardo Oyarzun',
author_email='[email protected]',
include_package_data=True,
url='https://claraweb.jlab.org',
license=LICENSE,
long_description=README,
test_suite="tests",
tests_require=['pytest'],
cmdclass={
'test': ClaraTest,
'clean': ClaraClean,
},
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*",
"tests", "examples", "examples.*"]),
package_dir={"clara_bootstrap": "clara_bootstrap"},
scripts=['clara_bootstrap/scripts/clara']
)
|
the-stack_0_7559 |
import pandas as pd
import h5py
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
print("Mod imported")
data_case_storage='C:/Users/Arthur/Google Drive/Projets/Autres/OliverWyman/Data/transaction_and_locations_resident.h5'
rev_moy_bord=2259
rev_moy_paris=3417
rev_moy_toul=2047
rev_moy_nice=2018
rev_moy_lille=1821
## Step 4
## Definition of a function reading a table
def read_HDF_file(file_name, table):
with pd.HDFStore(file_name, complevel=9, complib='blosc') as store:
dts = store[table]
print(dts.shape)
#tab = np.array(dts[:])
return dts#,tab
## Calling example: printing the full table /transactions_and_locations(column labels and data)
df=read_HDF_file(data_case_storage,"/transaction_and_locations")
print("data imported")
"""
Le but est de déterminer quelle répartition de prix donne le meilleur revenu pour Paris, en 2014, le prix d'une heure de parking pour un résident est de 0.065€/h
"""
#revenu_initial=df['amount'].sum()
revenu_initial=11450943.100000003
#info de http://www.leparisien.fr/paris-75/stationnement-a-paris-embouteillage-pour-les-demandes-de-cartes-resident-04-04-2018-7646091.php
n_carte_resident=111165
def repartition_lille_revenu():
#1/2 journée 1€ type a1
#1 journée 2€ type a2
#7 jours 8€ type a3
#1 mois 25€ (pas présent dans les données car la durée maximale est de 20h)
a1 = df.loc[(df['duration_hours'] < 10)]
a2 = df.loc[(df['duration_hours'] == 10)]
a3 = df.loc[(df['duration_hours'] > 10)]
print('a1: '+str(len(a1)))
print('a2: '+str(len(a2)))
print('a3: '+str(len(a3)))
print('a1+a2+a3: '+str(len(a3)+len(a2)+len(a1)))
revenu = len(a1)*1 + len(a2)*2+ len(a3)*8
return revenu
def repartition_bordeaux_revenu():
#1€/journée type a1
#6€/semaine type a2
#...
a1 = df.loc[(df['duration_hours'] <= 10)]
a2 = df.loc[(df['duration_hours'] > 10)]
print('a1: '+str(len(a1)))
print('a2: '+str(len(a2)))
print('a1+a2: '+str(len(a2)+len(a1)))
revenu = len(a1)*1 + len(a2)*6
return revenu
def repartition_toulouse_revenu():
#Abonnement 1 an à 135€ qu'on considérera comme le choix général
#4€/semaine
#On considère uniquement le tarif semaine et on fait la moyenne de temps resté sur tous les parkings et on divise par le nombre de carte de résident
total_temps = df['duration_hours'].sum()
temps_moyen = total_temps/n_carte_resident
nombre_de_semaines_moyen=(temps_moyen/10)/7
revenu=n_carte_resident*135 + 4*nombre_de_semaines_moyen
return revenu
def repartition_nice_revenu():
#Abonnement à 10€/an
#1.5€/journée type a1
#7€/semaine type a2
#...
a1 = df.loc[(df['duration_hours'] <= 10)]
a2 = df.loc[(df['duration_hours'] > 10)]
print('a1: '+str(len(a1)))
print('a2: '+str(len(a2)))
print('a1+a2: '+str(len(a2)+len(a1)))
revenu = len(a1)*1.5 + len(a2)*7 + n_carte_resident*10
return revenu
"""
print("Initialement: " +str(revenu_initial))
nice = repartition_nice_revenu()
print("Nice: "+str(nice))
toulouse = repartition_toulouse_revenu()
print("Toulouse: "+str(toulouse))
bordeaux = repartition_bordeaux_revenu()
print("Bordeaux: "+str(bordeaux))
lille = repartition_lille_revenu()
print("Lille: "+str(lille))
paris = revenu_initial
"""
def do_graph():
revenus=(paris, nice, toulouse, bordeaux, lille)
revenus_id=(1,2,3,4,5)
revenus_ponderes=(paris, nice*(rev_moy_paris/rev_moy_nice), toulouse*(rev_moy_paris/rev_moy_toul), bordeaux*(rev_moy_paris/rev_moy_bord), lille*(rev_moy_paris/rev_moy_lille))
ind = np.arange(len(revenus)) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind - width/2, revenus, width, yerr=revenus_id,
color='SkyBlue', label='non pondéré')
rects2 = ax.bar(ind + width/2, revenus_ponderes, width, yerr=revenus_id,
color='IndianRed', label='pondéré')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Revenu')
ax.set_title('Revenu des parkings en fonction de différents systèmes')
ax.set_xticks(ind)
ax.set_xticklabels(('Paris', 'Nice', 'Toulouse', 'Bordeaux', 'Lille'))
ax.legend()
def autolabel(rects, xpos='center'):
"""
Attach a text label above each bar in *rects*, displaying its height.
*xpos* indicates which side to place the text w.r.t. the center of
the bar. It can be one of the following {'center', 'right', 'left'}.
"""
xpos = xpos.lower() # normalize the case of the parameter
ha = {'center': 'center', 'right': 'left', 'left': 'right'}
offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()*offset[xpos], 1.01*height,
'{}'.format(height), ha=ha[xpos], va='bottom')
autolabel(rects1, "left")
autolabel(rects2, "right")
plt.show()
#do_graph()
NV_IDF = 22522
NV_PACA = 19893
NV_NA = 19991.7
NV_OCC = 19457.2
NV_HDF=18812
def do_graph_niveau_de_vie():
revenus=(paris, nice, toulouse, bordeaux, lille)
revenus_id=(1,2,3,4,5)
revenus_ponderes=(paris, nice*(NV_IDF/NV_PACA), toulouse*(NV_IDF/NV_OCC), bordeaux*(NV_IDF/NV_NA), lille*(NV_IDF/NV_HDF))
ind = np.arange(len(revenus)) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind - width/2, revenus, width, yerr=revenus_id,
color='SkyBlue', label='non pondéré')
rects2 = ax.bar(ind + width/2, revenus_ponderes, width, yerr=revenus_id,
color='IndianRed', label='pondéré')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Revenu')
ax.set_title('Revenu des parkings en fonction de différents systèmes')
ax.set_xticks(ind)
ax.set_xticklabels(('Paris', 'Nice', 'Toulouse', 'Bordeaux', 'Lille'))
ax.legend()
def autolabel(rects, xpos='center'):
"""
Attach a text label above each bar in *rects*, displaying its height.
*xpos* indicates which side to place the text w.r.t. the center of
the bar. It can be one of the following {'center', 'right', 'left'}.
"""
xpos = xpos.lower() # normalize the case of the parameter
ha = {'center': 'center', 'right': 'left', 'left': 'right'}
offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()*offset[xpos], 1.01*height,
'{}'.format(height), ha=ha[xpos], va='bottom')
autolabel(rects1, "left")
autolabel(rects2, "right")
plt.show()
#do_graph_niveau_de_vie()
"""
Finalement, pour les résidents, la solution suivante va être mise en place:
Il y aura un abonnement à prix dégressif en fonction de la distance au centre de Paris:
* 60€ pour les arrondissements 1 à 4
* 50€ pour les arrondissements 5 à 11
* 40€ pour les arrondissement >= à 12
Les prix seront la moyenne pondérée par rapport au niveau de vie des villes de Lille, Bordeaux, Toulouse et Nice sur 1/2 journée, 1 journée et 1 semaine
soit:
* 1/2 journée : (0.75*NV_IDF/NV_PACA + 1*NV_IDF/NV_HDF + 0.3*NV_IDF/NV_OCC + 0.5*NV_IDF/NV_NA)/4
* 1 journée: (1.5*NV_IDF/NV_PACA + 2*NV_IDF/NV_HDF + 0.6*NV_IDF/NV_OCC + 1*NV_IDF/NV_NA)/4
* 1 semaine: (7*NV_IDF/NV_PACA + 8*NV_IDF/NV_HDF + 4*NV_IDF/NV_OCC+6*NV_IDF/NV_NA)/4
Pour déterminer les revenus par abonnements, on va supposer que le nombre de cartes de résident est proportionnel au counts pour chaque arrondissement
"""
def ratio_by_arr():
"""
id_arr=np.load("C:/Users/Arthur/Google Drive/Projets/Autres/OliverWyman/Data/id_arr.npy")
d={}
for a in id_arr:
d.update({a[0]:a[1]})
avg_by_park_by_hour=np.load("C:/Users/Arthur/Google Drive/Projets/Autres/OliverWyman/Data/avg_by_park_by_hour.npy")
counts_by_arr=[0 for k in range(20)]
for a in avg_by_park_by_hour:
counts_by_arr[d[a[0]]-1]+=sum(a[1:])/12
s=sum(counts_by_arr)
#Maintenant il faut également prendre en compte le nombre de résidents, pour ce faire on va prendre en compte le pourcentage de résidents par arrondissement grâce à transactions et locations
data_case_storage_rotatif='C:/Users/Arthur/Google Drive/Projets/Autres/OliverWyman/Data/transaction_and_locations_rotatif.h5'
df_rot =read_HDF_file(data_case_storage_rotatif,"/transaction_and_locations")
"""
#On parcourt maintenant transaction_and_location_resident et on compte pour chaque arrondissement le nombre de transactions
counts_by_arr=[0 for k in range(20)]
for arr in range(20):
counts_by_arr[arr]+=len(df.loc[df['arrondissement']==arr+1])
#Maintenant on calcule les ratios correspondants
s=sum(counts_by_arr)
for i in range(len(counts_by_arr)):
counts_by_arr[i]/=s
return counts_by_arr
def revenu_final():
dem_journee=(0.75*NV_IDF/NV_PACA + 1*NV_IDF/NV_HDF + 0.3*NV_IDF/NV_OCC + 0.5*NV_IDF/NV_NA)/4
journee=(1.5*NV_IDF/NV_PACA + 2*NV_IDF/NV_HDF + 0.6*NV_IDF/NV_OCC + 1*NV_IDF/NV_NA)/4
semaine=(7*NV_IDF/NV_PACA + 8*NV_IDF/NV_HDF + 4*NV_IDF/NV_OCC+6*NV_IDF/NV_NA)/4
#On doit approximer le nombre de carte de résident par arrondissement
r_by_arr=ratio_by_arr()
revenu=0
revenu+=60*n_carte_resident*sum(r_by_arr[0:4])
revenu+=50*n_carte_resident*sum(r_by_arr[4:11])
revenu+=40*n_carte_resident*sum(r_by_arr[11:])
revenu_carte=revenu
print(revenu)
a1 = df.loc[(df['duration_hours'] < 10)]
a2 = df.loc[(df['duration_hours'] == 10)]
a3=df.loc[(df['duration_hours'] > 10)]
revenu += len(a1)*dem_journee + len(a2)*journee + len(a3)*semaine
print(revenu)
#Maintenant on augmente le prix par vignette
#On augmente le prix pour chaque vignette de (n°vignette/5)
revenu_by_vignette=[revenu*0.24/100, revenu*18/100, revenu*43/100, revenu*27/100, revenu*8.75/100, revenu*1.9/100]
for i in range(len(revenu_by_vignette)):
revenu_by_vignette[i]*=1+i/5
return sum(revenu_by_vignette)
def revenu_paris_2018():
#Abonnement à 45€/an
#1.5€/journée type a1
#9€/semaine type a2
a1 = df.loc[(df['duration_hours'] <= 10)]
a2 = df.loc[(df['duration_hours'] > 10)]
print('a1: '+str(len(a1)))
print('a2: '+str(len(a2)))
print('a1+a2: '+str(len(a2)+len(a1)))
revenu = len(a1)*1.5 + len(a2)*9 + n_carte_resident*45
return revenu
|
the-stack_0_7561 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import math
import paddle.distributed.fleet as fleet
class DNNLayer(nn.Layer):
def __init__(self,
sparse_feature_number,
sparse_feature_dim,
dense_feature_dim,
num_field,
layer_sizes,
sync_mode=None):
super(DNNLayer, self).__init__()
self.sync_mode = sync_mode
self.sparse_feature_number = sparse_feature_number
self.sparse_feature_dim = sparse_feature_dim
self.dense_feature_dim = dense_feature_dim
self.num_field = num_field
self.layer_sizes = layer_sizes
self.embedding = paddle.nn.Embedding(
self.sparse_feature_number,
self.sparse_feature_dim,
sparse=True,
weight_attr=paddle.ParamAttr(
name="SparseFeatFactors",
initializer=paddle.nn.initializer.Uniform()))
sizes = [sparse_feature_dim * num_field + dense_feature_dim
] + self.layer_sizes + [2]
acts = ["relu" for _ in range(len(self.layer_sizes))] + [None]
self._mlp_layers = []
for i in range(len(layer_sizes) + 1):
linear = paddle.nn.Linear(
in_features=sizes[i],
out_features=sizes[i + 1],
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Normal(
std=1.0 / math.sqrt(sizes[i]))))
self.add_sublayer('linear_%d' % i, linear)
self._mlp_layers.append(linear)
if acts[i] == 'relu':
act = paddle.nn.ReLU()
self.add_sublayer('act_%d' % i, act)
self._mlp_layers.append(act)
def forward(self, sparse_inputs, dense_inputs):
sparse_embs = []
for s_input in sparse_inputs:
if self.sync_mode == "gpubox":
emb = paddle.fluid.contrib.sparse_embedding(
input=s_input,
size=[self.sparse_feature_number, self.sparse_feature_dim],
param_attr=paddle.ParamAttr(name="embedding"))
else:
emb = self.embedding(s_input)
emb = paddle.reshape(emb, shape=[-1, self.sparse_feature_dim])
sparse_embs.append(emb)
y_dnn = paddle.concat(x=sparse_embs + [dense_inputs], axis=1)
for n_layer in self._mlp_layers:
y_dnn = n_layer(y_dnn)
return y_dnn
class StaticModel():
def __init__(self, config):
self.cost = None
self.infer_target_var = None
self.config = config
self._init_hyper_parameters()
self.sync_mode = config.get("runner.sync_mode")
def _init_hyper_parameters(self):
self.is_distributed = False
self.distributed_embedding = False
if self.config.get("hyper_parameters.distributed_embedding", 0) == 1:
self.distributed_embedding = True
self.sparse_feature_number = self.config.get(
"hyper_parameters.sparse_feature_number")
self.sparse_feature_dim = self.config.get(
"hyper_parameters.sparse_feature_dim")
self.sparse_inputs_slots = self.config.get(
"hyper_parameters.sparse_inputs_slots")
self.dense_input_dim = self.config.get(
"hyper_parameters.dense_input_dim")
self.learning_rate = self.config.get(
"hyper_parameters.optimizer.learning_rate")
self.fc_sizes = self.config.get("hyper_parameters.fc_sizes")
def create_feeds(self, is_infer=False):
dense_input = paddle.static.data(
name="dense_input",
shape=[None, self.dense_input_dim],
dtype="float32")
sparse_input_ids = [
paddle.static.data(
name="C" + str(i), shape=[None, 1], dtype="int64")
for i in range(1, self.sparse_inputs_slots)
]
label = paddle.static.data(name="label", shape=[None, 1], dtype="int64")
feeds_list = [label] + sparse_input_ids + [dense_input]
return feeds_list
def net(self, input, is_infer=False):
self.label_input = input[0]
self.sparse_inputs = input[1:self.sparse_inputs_slots]
self.dense_input = input[-1]
sparse_number = self.sparse_inputs_slots - 1
dnn_model = DNNLayer(
self.sparse_feature_number,
self.sparse_feature_dim,
self.dense_input_dim,
sparse_number,
self.fc_sizes,
sync_mode=self.sync_mode)
raw_predict_2d = dnn_model.forward(self.sparse_inputs, self.dense_input)
predict_2d = paddle.nn.functional.softmax(raw_predict_2d)
self.predict = predict_2d
auc, batch_auc, [
self.batch_stat_pos, self.batch_stat_neg, self.stat_pos,
self.stat_neg
] = paddle.static.auc(input=self.predict,
label=self.label_input,
num_thresholds=2**12,
slide_steps=20)
self.inference_target_var = auc
if is_infer:
fetch_dict = {'auc': auc}
return fetch_dict
cost = paddle.nn.functional.cross_entropy(
input=raw_predict_2d, label=self.label_input)
avg_cost = paddle.mean(x=cost)
self._cost = avg_cost
fetch_dict = {'cost': avg_cost, 'auc': auc}
return fetch_dict
|
the-stack_0_7564 | """
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
class Configuration(CloudFormationLintRule):
"""Check if Outputs are configured correctly"""
id = 'E6001'
shortdesc = 'Outputs have appropriate properties'
description = 'Making sure the outputs are properly configured'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/outputs-section-structure.html'
tags = ['outputs']
valid_keys = [
'Value',
'Export',
'Description',
'Condition'
]
def match(self, cfn):
"""Check CloudFormation Outputs"""
matches = list()
outputs = cfn.template.get('Outputs', {})
if outputs:
for output_name, output_value in outputs.items():
for prop in output_value:
if prop not in self.valid_keys:
message = 'Output {0} has invalid property {1}'
matches.append(RuleMatch(
['Outputs', output_name, prop],
message.format(output_name, prop)
))
return matches
|
the-stack_0_7565 | #Image Glitch is a program that is designed to make glitch art. It glitches an image by
# sorting the blue value of pixels in each column.
# This program was designed as an introduction to Python for beginners, and is not
# meant to be efficient.
# Because it is inefficient, please use small filesizes with this algorithm.
#
#author: Margo Morton
#version: 11/26/2014
def imageGlitch():
#display filepicker, get chosen image
filename=pickAFile()
pic=makePicture(filename)
#display image
show(pic)
#sort rows of pixels by blue value to get distortion effect
height=getHeight(pic)
width=getWidth(pic)
for x in range(0,width):
array = []
for y in range(0,height):
p=getPixel(pic,x,y)
#make an array of all pixels in a column
array.append(p)
#for each row, sort the corresponding pixel column
array = sortBlue(array, height)
#update the pixel ordering
for y in range(0,height):
p=getPixel(pic,x,y)
pixColor = makeColor(getRed(array[y]), getGreen(array[y]), getBlue(array[y]))
setColor(p, pixColor)
#update display
repaint(pic)
#sortBlue is a bubblesort algorithm (inefficient, but easy to understand) that
# sorts arrays of pixels based on blue value.
def sortBlue(array, length):
length = length-1
sorted = false
while not sorted:
sorted = true
for i in range(length):
#sort pixels in order of decreasing blue-ness
if getBlue(array[i]) < getBlue(array[i+1]):
sorted = false
#swap two pixels if the former has less blue than the latter
array[i], array[i+1] = array [i+1], array[i]
#returns sorted pixel array
return array
|
the-stack_0_7570 | # -*- coding: utf-8 -*-
# flake8: noqa
import os
import string
import random
import tempfile
import requests
import unittest
import pytest
from qiniu import Auth, set_default, etag, PersistentFop, build_op, op_save, Zone
from qiniu import put_data, put_file, put_stream
from qiniu import BucketManager, build_batch_copy, build_batch_rename, build_batch_move, build_batch_stat, \
build_batch_delete
from qiniu import urlsafe_base64_encode, urlsafe_base64_decode
from qiniu.compat import is_py2, is_py3, b
from qiniu.services.storage.uploader import _form_put
import qiniu.config
if is_py2:
import sys
import StringIO
import urllib
reload(sys)
sys.setdefaultencoding('utf-8')
StringIO = StringIO.StringIO
urlopen = urllib.urlopen
elif is_py3:
import io
import urllib
StringIO = io.StringIO
urlopen = urllib.request.urlopen
access_key = os.getenv('QINIU_ACCESS_KEY')
secret_key = os.getenv('QINIU_SECRET_KEY')
bucket_name = os.getenv('QINIU_TEST_BUCKET')
dummy_access_key = 'abcdefghklmnopq'
dummy_secret_key = '1234567890'
dummy_auth = Auth(dummy_access_key, dummy_secret_key)
def rand_string(length):
lib = string.ascii_uppercase
return ''.join([random.choice(lib) for i in range(0, length)])
def create_temp_file(size):
t = tempfile.mktemp()
f = open(t, 'wb')
f.seek(size - 1)
f.write(b('0'))
f.close()
return t
def remove_temp_file(file):
try:
os.remove(file)
except OSError:
pass
def is_travis():
return os.environ['QINIU_TEST_ENV'] == 'travis'
class UtilsTest(unittest.TestCase):
def test_urlsafe(self):
a = 'hello\x96'
u = urlsafe_base64_encode(a)
assert b(a) == urlsafe_base64_decode(u)
class AuthTestCase(unittest.TestCase):
def test_token(self):
token = dummy_auth.token('test')
assert token == 'abcdefghklmnopq:mSNBTR7uS2crJsyFr2Amwv1LaYg='
def test_token_with_data(self):
token = dummy_auth.token_with_data('test')
assert token == 'abcdefghklmnopq:-jP8eEV9v48MkYiBGs81aDxl60E=:dGVzdA=='
def test_noKey(self):
with pytest.raises(ValueError):
Auth(None, None).token('nokey')
with pytest.raises(ValueError):
Auth('', '').token('nokey')
def test_token_of_request(self):
token = dummy_auth.token_of_request('http://www.qiniu.com?go=1', 'test', '')
assert token == 'abcdefghklmnopq:cFyRVoWrE3IugPIMP5YJFTO-O-Y='
token = dummy_auth.token_of_request('http://www.qiniu.com?go=1', 'test', 'application/x-www-form-urlencoded')
assert token == 'abcdefghklmnopq:svWRNcacOE-YMsc70nuIYdaa1e4='
def test_verify_callback(self):
body = 'name=sunflower.jpg&hash=Fn6qeQi4VDLQ347NiRm-RlQx_4O2&location=Shanghai&price=1500.00&uid=123'
url = 'test.qiniu.com/callback'
ok = dummy_auth.verify_callback('QBox abcdefghklmnopq:ZWyeM5ljWMRFwuPTPOwQ4RwSto4=', url, body)
assert ok
class BucketTestCase(unittest.TestCase):
q = Auth(access_key, secret_key)
bucket = BucketManager(q)
def test_list(self):
ret, eof, info = self.bucket.list(bucket_name, limit=4)
print(info)
assert eof is False
assert len(ret.get('items')) == 4
ret, eof, info = self.bucket.list(bucket_name, limit=1000)
print(info)
assert eof is True
def test_buckets(self):
ret, info = self.bucket.buckets()
print(info)
assert bucket_name in ret
def test_prefetch(self):
ret, info = self.bucket.prefetch(bucket_name, 'python-sdk.html')
print(info)
assert ret['key'] == 'python-sdk.html'
def test_fetch(self):
ret, info = self.bucket.fetch('http://developer.qiniu.com/docs/v6/sdk/python-sdk.html', bucket_name,
'fetch.html')
print(info)
assert ret['key'] == 'fetch.html'
assert 'hash' in ret
def test_fetch_without_key(self):
ret, info = self.bucket.fetch('http://developer.qiniu.com/docs/v6/sdk/python-sdk.html', bucket_name)
print(info)
assert ret['key'] == ret['hash']
assert 'hash' in ret
def test_stat(self):
ret, info = self.bucket.stat(bucket_name, 'python-sdk.html')
print(info)
assert 'hash' in ret
def test_delete(self):
ret, info = self.bucket.delete(bucket_name, 'del')
print(info)
assert ret is None
assert info.status_code == 612
def test_rename(self):
key = 'renameto' + rand_string(8)
self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key)
key2 = key + 'move'
ret, info = self.bucket.rename(bucket_name, key, key2)
print(info)
assert ret == {}
ret, info = self.bucket.delete(bucket_name, key2)
print(info)
assert ret == {}
def test_copy(self):
key = 'copyto' + rand_string(8)
ret, info = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key)
print(info)
assert ret == {}
ret, info = self.bucket.delete(bucket_name, key)
print(info)
assert ret == {}
def test_change_mime(self):
ret, info = self.bucket.change_mime(bucket_name, 'python-sdk.html', 'text/html')
print(info)
assert ret == {}
def test_change_type(self):
target_key = 'copyto' + rand_string(8)
self.bucket.copy(bucket_name, 'copyfrom', bucket_name, target_key)
ret, info = self.bucket.change_type(bucket_name, target_key, 1)
print(info)
assert ret == {}
ret, info = self.bucket.stat(bucket_name, target_key)
print(info)
assert 'type' in ret
self.bucket.delete(bucket_name, target_key)
def test_copy_force(self):
ret, info = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, 'copyfrom', force='true')
print(info)
assert info.status_code == 200
def test_batch_copy(self):
key = 'copyto' + rand_string(8)
ops = build_batch_copy(bucket_name, {'copyfrom': key}, bucket_name)
ret, info = self.bucket.batch(ops)
print(info)
assert ret[0]['code'] == 200
ops = build_batch_delete(bucket_name, [key])
ret, info = self.bucket.batch(ops)
print(info)
assert ret[0]['code'] == 200
def test_batch_copy_force(self):
ops = build_batch_copy(bucket_name, {'copyfrom': 'copyfrom'}, bucket_name, force='true')
ret, info = self.bucket.batch(ops)
print(info)
assert ret[0]['code'] == 200
def test_batch_move(self):
key = 'moveto' + rand_string(8)
self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key)
key2 = key + 'move'
ops = build_batch_move(bucket_name, {key: key2}, bucket_name)
ret, info = self.bucket.batch(ops)
print(info)
assert ret[0]['code'] == 200
ret, info = self.bucket.delete(bucket_name, key2)
print(info)
assert ret == {}
def test_batch_move_force(self):
ret, info = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, 'copyfrom', force='true')
print(info)
assert info.status_code == 200
ops = build_batch_move(bucket_name, {'copyfrom': 'copyfrom'}, bucket_name, force='true')
ret, info = self.bucket.batch(ops)
print(info)
assert ret[0]['code'] == 200
def test_batch_rename(self):
key = 'rename' + rand_string(8)
self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key)
key2 = key + 'rename'
ops = build_batch_move(bucket_name, {key: key2}, bucket_name)
ret, info = self.bucket.batch(ops)
print(info)
assert ret[0]['code'] == 200
ret, info = self.bucket.delete(bucket_name, key2)
print(info)
assert ret == {}
def test_batch_rename_force(self):
ret, info = self.bucket.rename(bucket_name, 'copyfrom', 'copyfrom', force='true')
print(info)
assert info.status_code == 200
ops = build_batch_rename(bucket_name, {'copyfrom': 'copyfrom'}, force='true')
ret, info = self.bucket.batch(ops)
print(info)
assert ret[0]['code'] == 200
def test_batch_stat(self):
ops = build_batch_stat(bucket_name, ['python-sdk.html'])
ret, info = self.bucket.batch(ops)
print(info)
assert ret[0]['code'] == 200
def test_delete_after_days(self):
days = '5'
ret, info = self.bucket.delete_after_days(bucket_name, 'invaild.html', days)
assert info.status_code == 612
key = 'copyto' + rand_string(8)
ret, info = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key)
ret, info = self.bucket.delete_after_days(bucket_name, key, days)
assert info.status_code == 200
class UploaderTestCase(unittest.TestCase):
mime_type = "text/plain"
params = {'x:a': 'a'}
q = Auth(access_key, secret_key)
def test_put(self):
key = 'a\\b\\c"hello'
data = 'hello bubby!'
token = self.q.upload_token(bucket_name)
ret, info = put_data(token, key, data)
print(info)
assert ret['key'] == key
def test_put_crc(self):
key = ''
data = 'hello bubby!'
token = self.q.upload_token(bucket_name, key)
ret, info = put_data(token, key, data, check_crc=True)
print(info)
assert ret['key'] == key
def test_putfile(self):
localfile = __file__
key = 'test_file'
token = self.q.upload_token(bucket_name, key)
ret, info = put_file(token, key, localfile, mime_type=self.mime_type, check_crc=True)
print(info)
assert ret['key'] == key
assert ret['hash'] == etag(localfile)
def test_putInvalidCrc(self):
key = 'test_invalid'
data = 'hello bubby!'
crc32 = 'wrong crc32'
token = self.q.upload_token(bucket_name)
ret, info = _form_put(token, key, data, None, None, crc=crc32)
print(info)
assert ret is None
assert info.status_code == 400
def test_putWithoutKey(self):
key = None
data = 'hello bubby!'
token = self.q.upload_token(bucket_name)
ret, info = put_data(token, key, data)
print(info)
assert ret['hash'] == ret['key']
data = 'hello bubby!'
token = self.q.upload_token(bucket_name, 'nokey2')
ret, info = put_data(token, None, data)
print(info)
assert ret is None
assert info.status_code == 403 # key not match
def test_withoutRead_withoutSeek_retry(self):
key = 'retry'
data = 'hello retry!'
set_default(default_zone=Zone('http://a', 'http://upload.qiniu.com'))
token = self.q.upload_token(bucket_name)
ret, info = put_data(token, key, data)
print(info)
assert ret['key'] == key
assert ret['hash'] == 'FlYu0iBR1WpvYi4whKXiBuQpyLLk'
def test_putData_without_fname(self):
if is_travis():
return
localfile = create_temp_file(30 * 1024 * 1024)
key = 'test_putData_without_fname'
with open(localfile, 'rb') as input_stream:
token = self.q.upload_token(bucket_name)
ret, info = put_data(token, key, input_stream)
print(info)
assert ret is not None
def test_putData_without_fname1(self):
if is_travis():
return
localfile = create_temp_file(30 * 1024 * 1024)
key = 'test_putData_without_fname1'
with open(localfile, 'rb') as input_stream:
token = self.q.upload_token(bucket_name)
ret, info = put_data(token, key, input_stream, self.params, self.mime_type, False, None, "")
print(info)
assert ret is not None
def test_putData_without_fname2(self):
if is_travis():
return
localfile = create_temp_file(30 * 1024 * 1024)
key = 'test_putData_without_fname2'
with open(localfile, 'rb') as input_stream:
token = self.q.upload_token(bucket_name)
ret, info = put_data(token, key, input_stream, self.params, self.mime_type, False, None, " ")
print(info)
assert ret is not None
class ResumableUploaderTestCase(unittest.TestCase):
mime_type = "text/plain"
params = {'x:a': 'a'}
q = Auth(access_key, secret_key)
def test_put_stream(self):
localfile = __file__
key = 'test_file_r'
size = os.stat(localfile).st_size
with open(localfile, 'rb') as input_stream:
token = self.q.upload_token(bucket_name, key)
ret, info = put_stream(token, key, input_stream, os.path.basename(__file__), size, self.params,
self.mime_type)
print(info)
assert ret['key'] == key
def test_big_file(self):
key = 'big'
token = self.q.upload_token(bucket_name, key)
localfile = create_temp_file(4 * 1024 * 1024 + 1)
progress_handler = lambda progress, total: progress
qiniu.set_default(default_zone=Zone('http://a', 'http://upload.qiniu.com'))
ret, info = put_file(token, key, localfile, self.params, self.mime_type, progress_handler=progress_handler)
print(info)
assert ret['key'] == key
remove_temp_file(localfile)
def test_retry(self):
localfile = __file__
key = 'test_file_r_retry'
qiniu.set_default(default_zone=Zone('http://a', 'http://upload.qiniu.com'))
token = self.q.upload_token(bucket_name, key)
ret, info = put_file(token, key, localfile, self.params, self.mime_type)
print(info)
assert ret['key'] == key
assert ret['hash'] == etag(localfile)
class DownloadTestCase(unittest.TestCase):
q = Auth(access_key, secret_key)
def test_private_url(self):
private_bucket = 'private-res'
private_key = 'gogopher.jpg'
base_url = 'http://%s/%s' % (private_bucket + '.qiniudn.com', private_key)
private_url = self.q.private_download_url(base_url, expires=3600)
print(private_url)
r = requests.get(private_url)
assert r.status_code == 200
class MediaTestCase(unittest.TestCase):
def test_pfop(self):
q = Auth(access_key, secret_key)
pfop = PersistentFop(q, 'testres', 'sdktest')
op = op_save('avthumb/m3u8/segtime/10/vcodec/libx264/s/320x240', 'pythonsdk', 'pfoptest')
ops = []
ops.append(op)
ret, info = pfop.execute('sintel_trailer.mp4', ops, 1)
print(info)
assert ret['persistentId'] is not None
class EtagTestCase(unittest.TestCase):
def test_zero_size(self):
open("x", 'a').close()
hash = etag("x")
assert hash == 'Fto5o-5ea0sNMlW_75VgGJCv2AcJ'
remove_temp_file("x")
def test_small_size(self):
localfile = create_temp_file(1024 * 1024)
hash = etag(localfile)
assert hash == 'FnlAdmDasGTQOIgrU1QIZaGDv_1D'
remove_temp_file(localfile)
def test_large_size(self):
localfile = create_temp_file(4 * 1024 * 1024 + 1)
hash = etag(localfile)
assert hash == 'ljF323utglY3GI6AvLgawSJ4_dgk'
remove_temp_file(localfile)
class ReadWithoutSeek(object):
def __init__(self, str):
self.str = str
pass
def read(self):
print(self.str)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_7571 | import json
import sys
# Loads JSON config file
if len(sys.argv) != 2:
print("Provide single filename")
sys.exit()
else:
configFileName = sys.argv[1]
with open(configFileName, 'r') as json_file:
try:
global file
file = json.load(json_file)
except:
print("Unable to load JSON file")
sys.exit()
print(len(file['nodes'])) |
the-stack_0_7572 | from datetime import datetime
import json
import pytz
from majestic.content import BlogObject
from majestic.utils import chunk, absolute_urls
class PostsCollection(BlogObject):
"""Base class for a collection of posts
This should be subclassed for objects that work on several posts,
such as for indexes and archives.
Apart from the settings object, it takes only one argument on
initialisation: a collection of post that is stored newest-first
(the collection is sorted in reverse order).
"""
def __init__(self, posts, settings):
self._settings = settings
self.posts = sorted(posts, reverse=True)
def __iter__(self):
"""Iterate over self.posts"""
return (post for post in self.posts)
class Index(PostsCollection):
"""Index represents a blog index page
It has the following attributes:
page_number: 1 to len(index_pages)
newer_index_url: url to an index with more recent posts or None
older_index_url: url to an index with less recent posts or None
output_path: path the index should be written to (pathlib.Path)
url: url of the index (str)
posts: [Post] to be rendered on the index page
An Index created with page_number 1 is always saved to a file named
index.html and its url is the site's url.
The class method .paginate_posts creates a list of Index objects out
of a list of posts.
"""
_path_template_key = 'index pages path template'
_template_file_key = 'index'
def __init__(self, page_number, posts, settings,
newer_index_url=None, older_index_url=None):
"""Initialise the Index and computer output_path and url"""
super().__init__(posts=posts, settings=settings)
self.page_number = page_number
self.newer_index_url = newer_index_url
self.older_index_url = older_index_url
if page_number == 1:
self.path_part = 'index.html' # Override for output path
self.url = settings['site']['url'] # Set as plain url
def __iter__(self):
"""Iterate over self.posts"""
return (post for post in self.posts)
def __eq__(self, other):
"""Compare self with other based on content attributes"""
attrs = ['page_number', 'posts', 'output_path', 'url',
'newer_index_url', 'older_index_url']
return all(getattr(self, a) == getattr(other, a) for a in attrs)
def __lt__(self, other):
"""Index compares by page_number"""
return self.page_number < other.page_number
def __str__(self):
"""Return str(self)"""
template = 'Index page {page_number}, {num_posts} posts ({url})'
return template.format(page_number=self.page_number,
num_posts=len(self.posts),
url=self.url)
@classmethod
def paginate_posts(cls, posts, settings):
"""Split up posts across a list of index pages
The returned list is ordered by index page number.
"""
posts_per_page = settings['index']['posts per page']
posts_newest_first = sorted(posts, reverse=True)
chunked = chunk(posts_newest_first, chunk_length=posts_per_page)
index_list = [cls(page_number=n, settings=settings, posts=post_list)
for n, post_list in enumerate(chunked, start=1)]
for n, index_object in enumerate(index_list):
if n != 0: # First index has the newest posts
index_object.newer_index_url = index_list[n - 1].url
if n + 1 < len(index_list): # Last index has the oldest posts
index_object.older_index_url = index_list[n + 1].url
return index_list
class Feed(PostsCollection):
"""A generic feed for a blog"""
def __init__(self, posts, settings):
"""Initialise Feed with a list of posts and the site settings
posts can be any list of posts, and only the most recent n are
stored as a posts attribute on the object. The number chosen
is set in the settings file under [feeds][number of posts].
The superclass's __init__ isn't called because the posts list
has to be sorted before being limited, so there's no point
calling super().__init__ and doing unnecessary work.
"""
self._settings = settings
post_limit = settings['feeds']['number of posts']
self.posts = sorted(posts, reverse=True)[:post_limit]
class RSSFeed(Feed):
"""An RSS feed for a blog"""
_path_template_key = 'rss path template'
_template_file_key = 'rss'
class JSONFeed(Feed):
"""A JSON feed for a blog
Valid for JSON Feed version 1 (https://jsonfeed.org/version/1)
"""
_path_template_key = 'json feed path template'
# _template_file_key deliberately unset as JSONFeed
# will not be rendered using a Jinja template
def render_to_disk(self, *args, **kwargs):
"""Write a valid JSON feed dictionary to disk
This overrides the standard BlogObject method because it
doesn't make use of Jinja templating to construct the
representation written on disk.
Intead it constructs a dictionary and serialises that
with the standard json module.
"""
feed_dict = dict(
version='https://jsonfeed.org/version/1',
title=self._settings['site']['title'],
home_page_url=self._settings['site']['url'],
feed_url=self.url,
description=self._settings['site']['description'],
**self._settings['feeds']['json'])
feed_dict['items'] = [
{'id': p.url,
'url': p.url,
'title': p.title,
'content_html':
absolute_urls(p.html, self._settings['site']['url']),
'date_published': p.date.isoformat(timespec='seconds')}
for p in self.posts
]
self.output_path.parent.mkdir(parents=True, exist_ok=True)
with self.output_path.open(mode='w', encoding='utf-8') as file:
json.dump(feed_dict, file, indent=2)
class Archives(PostsCollection):
"""An archives page for a blog
Should be initialised with all of the blog's posts.
"""
_path_template_key = 'archives path template'
_template_file_key = 'archives'
class Sitemap(BlogObject):
"""Represents an XML sitemap
Contains a list of tuples [(str, datetime)] that correspond to the
url (loc) and modification date (lastmod) of each sitemap entry.
The modification date is the file's modification time in UTC, as an
aware datetime. This skips around issues of retrieving the system
timezone (not a trivial task and of no advantage) yet allows the
inclusion of a timezone in the sitemap itself.
"""
_path_template_key = 'sitemap path template'
_template_file_key = 'sitemap'
def __init__(self, content, settings):
"""Initialise Sitemap with site settings and a list of BlogObjects
content: [BlogObject] containing each file to be represented
"""
self._settings = settings
self.url_date_pairs = []
for file in content:
url = file.url
mtime = file.output_path.stat().st_mtime
mod_date = datetime.fromtimestamp(mtime, tz=pytz.utc)
self.url_date_pairs.append((url, mod_date))
def __iter__(self):
"""Iterate over the tuples in self.url_date_pairs"""
return (item for item in self.url_date_pairs)
|
the-stack_0_7576 | import logging
import os
import random
import warnings
from functools import wraps
from typing import Optional
import numpy as np
import torch
log = logging.getLogger(__name__)
def seed_everything(seed: Optional[int] = None, workers: bool = False, deterministic: bool = False) -> int:
"""
``
This part of the code comes from PyTorch Lightning.
https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/utilities/seed.py
``
Function that sets seed for pseudo-random number generators in:
pytorch, numpy, python.random
In addition, sets the following environment variables:
- `PL_GLOBAL_SEED`: will be passed to spawned subprocesses (e.g. ddp_spawn backend).
- `PL_SEED_WORKERS`: (optional) is set to 1 if ``workers=True``.
Args:
seed: the integer value seed for global random state in Lightning.
If `None`, will read seed from `PL_GLOBAL_SEED` env variable
or select it randomly.
workers: if set to ``True``, will properly configure all dataloaders passed to the
Trainer with a ``worker_init_fn``. If the user already provides such a function
for their dataloaders, setting this argument will have no influence. See also:
:func:`~pytorch_lightning.utilities.seed.pl_worker_init_function`.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
max_seed_value = np.iinfo(np.uint32).max
min_seed_value = np.iinfo(np.uint32).min
try:
if seed is None:
seed = os.environ.get("PL_GLOBAL_SEED")
seed = int(seed)
except (TypeError, ValueError):
seed = _select_seed_randomly(min_seed_value, max_seed_value)
rank_zero_warn(f"No correct seed found, seed set to {seed}")
if not (min_seed_value <= seed <= max_seed_value):
rank_zero_warn(f"{seed} is not in bounds, numpy accepts from {min_seed_value} to {max_seed_value}")
seed = _select_seed_randomly(min_seed_value, max_seed_value)
# using `log.info` instead of `rank_zero_info`,
# so users can verify the seed is properly set in distributed training.
log.info(f"Global seed set to {seed}")
os.environ["PL_GLOBAL_SEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ["PL_SEED_WORKERS"] = f"{int(workers)}"
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
return seed
def _select_seed_randomly(min_seed_value: int = 0, max_seed_value: int = 255) -> int:
return random.randint(min_seed_value, max_seed_value)
def rank_zero_only(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
if rank_zero_only.rank == 0:
return fn(*args, **kwargs)
return wrapped_fn
@rank_zero_only
def rank_zero_warn(*args, stacklevel: int = 4, **kwargs):
warnings.warn(*args, stacklevel=stacklevel, **kwargs)
def reset_seed() -> None:
"""
Reset the seed to the value that :func:`seed_everything` previously set.
If :func:`seed_everything` is unused, this function will do nothing.
"""
seed = os.environ.get("PL_GLOBAL_SEED", None)
workers = os.environ.get("PL_SEED_WORKERS", False)
if seed is not None:
seed_everything(int(seed), workers=bool(workers))
def format_logs(logs):
str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]
s = ', '.join(str_logs)
return s
def check_tensor(data, is_label):
if not is_label:
return data if data.ndim <= 4 else data.squeeze()
return data.long() if data.ndim <= 3 else data.squeeze().long()
|
the-stack_0_7577 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
set_cache_mocktime,
set_genesis_mocktime,
get_mocktime
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
set_genesis_mocktime()
else:
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
set_cache_mocktime()
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitmonixds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitmonixds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing bitmonixd/bitmonix-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: bitmonixds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for f in filenames:
print("From" , f, ":")
from collections import deque
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BXCD", "bitmonixd"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BXCD", "bitmonixd"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
|
the-stack_0_7578 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test accesibility of resource endpoints."""
from __future__ import unicode_literals
import json
from flask import url_for
from invenio_accounts.models import User
from invenio_accounts.testutils import login_user_via_session
NEW_INTERNAL_LOCATION = {
"location_pid": "locid-1",
"legacy_id": "Test legacy id",
"name": "My test internal location",
"physical_location": "Left from the right building",
"notes": "In house",
}
def user_login(user_id, client, users):
"""Util function log user in."""
login_user_via_session(
client, email=User.query.get(users[user_id].id).email
)
def _test_response(client, req_method, url, headers, data, expected_resp_code):
"""Util function testing response code."""
if data:
res = getattr(client, req_method)(
url, headers=headers, data=json.dumps(data)
)
else:
res = getattr(client, req_method)(url, headers=headers)
assert expected_resp_code == res.status_code
return res
def test_post_internal_location(client, json_headers, testdata, users):
"""Test POST of internal_location."""
user_login("admin", client, users)
url = url_for("invenio_records_rest.ilocid_list")
res = _test_response(
client, "post", url, json_headers, NEW_INTERNAL_LOCATION, 201
)
data = json.loads(res.data.decode("utf-8"))["metadata"]
assert "name" in data["location"]
def test_post_partial_internal_location(client, json_headers, testdata, users):
"""Test POST of internal_location without all required data."""
user_login("admin", client, users)
del NEW_INTERNAL_LOCATION["location_pid"]
url = url_for("invenio_records_rest.ilocid_list")
_test_response(
client, "post", url, json_headers, NEW_INTERNAL_LOCATION, 400
)
def test_post_item(client, json_headers, testdata, users, item_record):
"""Test POST of an item."""
user_login("admin", client, users)
url = url_for("invenio_records_rest.itemid_list")
res = _test_response(client, "post", url, json_headers, item_record, 201)
data = json.loads(res.data.decode("utf-8"))["metadata"]
assert "name" in data["internal_location"]
|
the-stack_0_7580 | from data_loader import *
import autosklearn.regression
import sklearn
import pickle
import matplotlib.pyplot as plt
import numpy as np
from train_config import train_indexes
def my_train_test_split(X,y,num_point=1,train_size=0.75,select_method='random',logfile=None):
num_point = len(X)
if select_method=='random':
train_index = random.sample(range(num_point),int(train_size*num_point))
test_index = [x for x in range(num_point) if x not in train_index]
elif select_method=='uniform':
train_index = [int(i*(1.0/train_size)) for i in range(int(train_size*num_point))]
test_index = [x for x in range(num_point) if x not in train_index]
print('train points: %s \ntest points: %s'%(train_index,test_index))
if logfile != None:
print('train points: %s \ntest points: %s'%(train_index,test_index),file=logfile)
flag = 0
for i in train_index:
if flag==0:
X_train = X[i]
y_train = y[i]
flag = 1
else:
X_train = np.vstack((X_train,X[i]))
y_train = np.vstack((y_train,y[i]))
flag = 0
for i in test_index:
if flag==0:
X_test = X[i]
y_test = y[i]
flag = 1
else:
X_test = np.vstack((X_test,X[i]))
y_test = np.vstack((y_test,y[i]))
return X_train, X_test, y_train, y_test
def index_train_test_split(X,y,train_index,test_index,logfile=None):
print('train points: %s \ntest points: %s'%(train_index,test_index))
if logfile != None:
print('train points: %s \ntest points: %s'%(train_index,test_index),file=logfile)
flag = 0
for i in train_index:
if flag==0:
X_train = X[i]
y_train = y[i]
flag = 1
else:
X_train = np.vstack((X_train,X[i]))
y_train = np.vstack((y_train,y[i]))
flag = 0
for i in test_index:
if flag==0:
X_test = X[i]
y_test = y[i]
flag = 1
else:
X_test = np.vstack((X_test,X[i]))
y_test = np.vstack((y_test,y[i]))
return X_train, X_test, y_train, y_test
def cal_range(r2,mse,mae,num_train_sizes,r2_limit):
r2 = np.array(r2)
mse = np.array(mse)
mae = np.array(mae)
r2 = np.reshape(r2,(num_train_sizes,10))
mse = np.reshape(mse,(num_train_sizes,10))
mae = np.reshape(mae,(num_train_sizes,10))
r2_mean = []
mse_mean = []
mae_mean = []
r2_std = []
mse_std = []
mae_std = []
for i in range(num_train_sizes):
fix_index = (r2[i]>r2_limit)
temp_r2 = r2[i][fix_index]
#print(temp_r2)
temp_r2_mean = np.mean(temp_r2)
temp_r2_std = np.std(temp_r2)
temp_mse = mse[i][fix_index]**0.5
temp_mse_mean = np.mean(temp_mse)
temp_mse_std = np.std(temp_mse)
temp_mae = mae[i][fix_index]
temp_mae_mean = np.mean(temp_mae)
temp_mae_std = np.std(temp_mae)
r2_mean.append(temp_r2_mean)
r2_std.append(temp_r2_std)
mse_mean.append(temp_mse_mean)
mse_std.append(temp_mse_std)
mae_mean.append(temp_mae_mean)
mae_std.append(temp_mae_std)
#print(temp_r2_mean,temp_mse_mean,temp_mae_mean)
#exit()
r2_mean = np.array(r2_mean)
mse_mean = np.array(mse_mean)
mae_mean = np.array(mae_mean)
r2_std = np.array(r2_std)
mse_std = np.array(mse_std)
mae_std = np.array(mae_std)
return r2_mean,mse_mean,mae_mean,r2_std,mse_std,mae_std
def main():
#load data
model_index = 0
X,y = load_data('./', probe_type='line', Xtype='loc',ytype='tn',logfile=None)
r2_mean = []
r2_std=[]
rmse_mean=[]
rmse_std = []
#loop
for i in range(len(train_indexes)):
current_indexes = train_indexes[i]
#analysis
r2_scores = []
mses = []
maes = []
for j in range(len(current_indexes)):
if model_index < 0:
#if current_indexes[j] != [88]:
model_index=model_index+1
print(model_index)
else:
logfile = open('model_log_singlemodel2.txt','a+')
train_index = current_indexes[j]
#multiply 3:
#for ii in range(len(train_index)):
# train_index[ii] = train_index[ii]*3
test_index = [x for x in list(range(len(X))) if x not in train_index]
X_train, X_test, y_train, y_test= index_train_test_split(X, y, train_index,test_index)
print('[*]dataset loaded! Train: %s, Test: %s'%(X_train.shape,X_test.shape))
train_size = X_train.shape[0]/(X_train.shape[0]+X_test.shape[0])
print('train size', train_size)
if train_size>0.25:
X_train = X_train[::4]
X_test = X_test[::4]
y_train = y_train[::4]
y_test = y_test[::4]
automl = autosklearn.regression.AutoSklearnRegressor(
time_left_for_this_task=30,#60*time_limit,
per_run_time_limit=2,#30*time_limit,
include_estimators = ['extra_trees'],
#include_preprocessors = ['no_preprocessing']
#tmp_folder='tmp_folder1',
#output_folder='tmp_output_folder1',
#ensemble_size=1,
)
automl.fit(X_train, y_train)
print(automl.show_models())
#uniform downsample
#print('[*]dataset downsample! Train: %s, Test: %s'%(X_train.shape,X_test.shape))
predictions = automl.predict(X_test)
r2_score = sklearn.metrics.r2_score(y_test, predictions)
mse = sklearn.metrics.mean_squared_error(y_test, predictions)
mae = sklearn.metrics.mean_absolute_error(y_test, predictions)
r2_scores.append(r2_score)
mses.append(mse)
maes.append(mae)
print("[*]R2 score:", r2_score,file=logfile)
print("[*]mse:", mse,file=logfile)
print("[*]mae:", mae,file=logfile)
print("[*]analysis:")
print(r2_scores,'\n',mses,'\n',maes)
s = pickle.dumps(automl)
with open('./line_models/model_pt'+str(len(train_index))+'_id'+str(model_index)+'_t.pkl', 'wb') as f:
f.write(s)
print('[*]save model!')
model_index = model_index + 1
#print('\n\n\n\n',file=logfile)
logfile.close()
r2_scores = np.array(r2_scores)
mses = np.array(mses)
maes = np.array(maes)
temp_r2_mean = np.mean(r2_scores)
temp_r2_std = np.std(r2_scores)
rmses = mses**0.5
temp_rmses_mean = np.mean(rmses)
temp_rmses_std = np.std(rmses)
r2_mean.append(temp_r2_mean)
r2_std.append(temp_r2_std)
rmse_mean.append(temp_rmses_mean)
rmse_std.append(temp_rmses_std)
print('\n\n\n','[*]calculate:',r2_mean,r2_std,rmse_mean,rmse_std,'\n\n\n')
#plot
'''
r2_mean = np.array(r2_mean)
r2_std = np.array(r2_std)
param_range = [1,2,3,4,5,6,7,8,9,10]
param_range = list(reversed(param_range))
plt.plot(param_range,r2_mean,color='b',marker='o',markersize=5,label='r2 score (loc)')
plt.fill_between(param_range,r2_mean+r2_std,r2_mean-r2_std,alpha=0.15,color='b')
#plt.plot(param_range,mse_mean,color='b',linestyle='--',marker='s',markersize=5,label='RMSE (loc)')
#plt.fill_between(param_range,mse_mean+mse_std,mse_mean-mse_std,alpha=0.15,color='b')
plt.xlabel('Training point')
plt.ylabel('Model Metric')
plt.legend(loc='lower right')
#plt.ylim([0,1])
plt.grid()
plt.title('Train Size Analyse')
plt.show()
'''
print('all done')
#load data
if __name__ == "__main__":
main()
|
the-stack_0_7583 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class ExpressRouteServiceProvider(Resource):
"""A ExpressRouteResourceProvider object.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param peering_locations: Get a list of peering locations.
:type peering_locations: list[str]
:param bandwidths_offered: Gets bandwidths offered.
:type bandwidths_offered:
list[~azure.mgmt.network.v2018_10_01.models.ExpressRouteServiceProviderBandwidthsOffered]
:param provisioning_state: Gets the provisioning state of the resource.
:type provisioning_state: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'peering_locations': {'key': 'properties.peeringLocations', 'type': '[str]'},
'bandwidths_offered': {'key': 'properties.bandwidthsOffered', 'type': '[ExpressRouteServiceProviderBandwidthsOffered]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, peering_locations=None, bandwidths_offered=None, provisioning_state: str=None, **kwargs) -> None:
super(ExpressRouteServiceProvider, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.peering_locations = peering_locations
self.bandwidths_offered = bandwidths_offered
self.provisioning_state = provisioning_state
|
the-stack_0_7586 | import random
from fastapi import APIRouter, Path, Query
from ..dependencies import get_json
router = APIRouter()
quotes = get_json("quotes")
@router.get("/quotes/all")
async def all_quotes():
return {
"quotes": [
{"id": int(q_id), "quote": quotes[q_id][0], "author": quotes[q_id][1]}
for q_id in quotes
]
}
@router.get("/quotes/{quote_id}")
async def quotes_by_id(quote_id: int = Path(..., ge=0, lt=len(quotes))):
quote_id = str(quote_id)
return {
"id": int(quote_id),
"quote": quotes[quote_id][0],
"author": quotes[quote_id][1],
}
@router.get("/quotes")
async def get_quotes(num: int = Query(1, ge=1, lt=len(quotes))):
random_ids = sorted(random.sample(range(len(quotes)), num))
return {
"quotes": [
{
"id": quote_id,
"quote": quotes[str(quote_id)][0],
"author": quotes[str(quote_id)][1],
}
for quote_id in random_ids
]
}
|
the-stack_0_7587 | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from base.exception_handlers.ReusableSequence import ReusableSequence
from riscv.exception_handlers.ExceptionHandlerContext import RegisterCallRole
## This class generates reusable subroutines that exception handlers can call.
class HandlerSubroutineGeneratorRISCV(ReusableSequence):
#constant values - indexed by mode (sv32/39/48)
#max levels in walk
LEVELS = {
32 : 2,
39 : 4,
48 : 4,
}
#pte size, in bytes
PTE_SIZE = {
32 : 4,
39 : 8,
48 : 8,
}
#shift pased on pte size, in bits
PTESIZE_SHIFT = {
32 : 2,
39 : 3,
48 : 3,
}
#constant values - same between SV32/39/48 walks
PPN_MASK = 0x3ffffffffffc00 #masks lower 10 bits of descriptor to isolate ppn
PPN_SHIFT = 10
PTE_SHIFT = 12
PTE_XWRV_MASK = 0xf
PTE_PTR_VAL = 0x1
LEVEL_MASK = 0x1ff
LEVEL_BITS = {
3 : (47, 39),
2 : (38, 30),
1 : (29, 21),
0 : (20, 12),
}
#LEVEL_MASK_32 = 0x3ff
#LEVEL_BITS_32 = { 1 : (31, 22), 0 : (21, 12) }
def __init__(self, aGenThread, aFactory, aStack):
super().__init__(aGenThread, aFactory, aStack)
self._mExceptionsStack = aStack
self._mPrivRegIndex = None
self._mCauseRegIndex = None
self._mPteAddrRegIndex = None
self._mPteRegIndex = None
self._mR1 = None
self._mR2 = None
self._mR3 = None
self._mCalleeSavedRegIndices = None
self._mWalkLevelRegIndex = None
self._mAtpRegIndex = None
self._mFaultAddrRegIndex = None
## Generate code to walk the page table and retrieve the address and value of a faulting
# descriptor. The descriptor address is written to Argument Register 0; the descriptor value is
# written to Argument Register 1.
#
# @param kwargs Keyword arguments containing a handler context object.
def generateTableWalk(self, **kwargs):
try:
handler_context = kwargs['handler_context']
except KeyError:
self.error('INTERNAL ERROR: one or more arguments to generateTableWalk() method missing.')
self._assignScratchRegisterIndices(handler_context)
self.debug('[HandlerSubroutineGenerator::generateTableWalk] entry point: 0x%x' % self.getPEstate('PC'))
self.mAssemblyHelper.clearLabels('TableWalk')
self.mAssemblyHelper.logDebugSymbol('generateTableWalk')
# Before modifying registers, save their old values
self._pushExceptionSpecificRegisters()
self._genLoadAllContextRegisters(self._mR1)
self.mAssemblyHelper.logDebugSymbol('Current Privilege Level: X%d' % self._mPrivRegIndex)
self.mAssemblyHelper.logDebugSymbol('Fault Address: X%d' % self._mFaultAddrRegIndex)
#self.mAssemblyHelper.addLabel('ATP MODE check') TODO add switching for diff modes
self.callRoutine('TableWalkSV48')
#TODO see if pteregindex has correct value
#self.mAssemblyHelper.addLabel('End genTableWalk')
#self.mAssemblyHelper.genMoveRegister(self._mPteRegIndex, self._mR2)
self._popExceptionSpecificRegisters()
self.mAssemblyHelper.genReturn()
def generateTableWalkSV48(self, **kwargs):
try:
handler_context = kwargs['handler_context']
except KeyError:
self.error('INTERNAL ERROR: one or more arguments to generateTableWalk() method missing.')
self._assignScratchRegisterIndices(handler_context)
self._genTableWalk()
def getPrerequisiteRoutineNames(self, aRoutineName):
if aRoutineName == 'TableWalk':
return ('TableWalkSV48',)
else:
return tuple()
## Initialize scratch register index member variables.
#
# @param aHandlerContext The exception handler context from which register indices can be
# retrieved by role.
def _assignScratchRegisterIndices(self, aHandlerContext):
self._mPrivRegIndex = aHandlerContext.getScratchRegisterIndices(RegisterCallRole.PRIV_LEVEL_VALUE)
self._mCauseRegIndex = aHandlerContext.getScratchRegisterIndices(RegisterCallRole.CAUSE_VALUE)
(self._mPteAddrRegIndex, self._mPteRegIndex) = aHandlerContext.getScratchRegisterIndices(RegisterCallRole.ARGUMENT, 2)
(self._mR1, self._mR2, self._mR3) = aHandlerContext.getScratchRegisterIndices(RegisterCallRole.TEMPORARY, 3)
self._mCalleeSavedRegIndices = aHandlerContext.getScratchRegisterIndices(RegisterCallRole.CALLEE_SAVED, 3)
(self._mWalkLevelRegIndex, self._mAtpRegIndex, self._mFaultAddrRegIndex) = self._mCalleeSavedRegIndices
## Record the values of all callee-saved registers that are used.
def _pushExceptionSpecificRegisters(self):
for reg in self._mCalleeSavedRegIndices:
self._mExceptionsStack.push(reg)
## Restore the values of all callee-saved registers that are used.
def _popExceptionSpecificRegisters(self):
for reg in reversed(self._mCalleeSavedRegIndices):
self._mExceptionsStack.pop(reg)
## Extract the address size, page granule size, faulting address, page table base address and
# fault level from the relevant registers.
#
# @param aScratchRegIndex The index of a register than can be freely modified.
def _genLoadAllContextRegisters(self, aScratchRegIndex):
#load ATP register for mode/ppn
self.mAssemblyHelper.logDebugSymbol('_genLoadAllContextRegisters')
self.mAssemblyHelper.genReadSystemRegister(self._mAtpRegIndex, 'satp')
#read faulting address based on privilege level
self.mAssemblyHelper.genMoveImmediate(self._mR1, 3)
self.mAssemblyHelper.genConditionalBranchToLabel(self._mR1, self._mPrivRegIndex, 6, 'NE', 'S PRIV')
self.mAssemblyHelper.genReadSystemRegister(self._mFaultAddrRegIndex, 'mtval')
self.mAssemblyHelper.genRelativeBranchToLabel(4, 'PPN MASK')
self.mAssemblyHelper.addLabel('S PRIV')
self.mAssemblyHelper.genReadSystemRegister(self._mFaultAddrRegIndex, 'stval')
#mask and shift root PPN into address from atp register
self.mAssemblyHelper.addLabel('PPN MASK')
self.mAssemblyHelper.genAndImmediate(self._mAtpRegIndex, 0xfffffffffff)
self.mAssemblyHelper.genShiftLeftImmediate(self._mAtpRegIndex, self.PTE_SHIFT)
#set up register to count levels walked
self.mAssemblyHelper.genMoveImmediate(self._mWalkLevelRegIndex, self.LEVELS[48]-1)
def _genTableWalk(self):
self.mAssemblyHelper.logDebugSymbol('Gen Table Walk Start')
self.debug('[_genTableWalk] PC before generating walk levels 0x%x' % self.getPEstate('PC'))
start_level = self.LEVELS[48] - 1
self.debug('[_genTableWalk] start level 0x%x' % start_level)
for cur_level in range(start_level, -1, -1):
self.debug('[_genTableWalk] Generating level %d' % cur_level)
self._genLevelWalk(cur_level)
#_mAtpRegIndex should hold the address of the next table either from ATP/PTE lookup
#_R3 used to compute new pte addr
#_R2 used to hold new pte val, check for PTE pointer
#_R1 used to hold immediates for comparison, and to load new value into mAtpRegIndex
def _genLevelWalk(self, aCurLevel):
#self._mWalkLevelLabels['LEVEL %d' % aCurLevel] = self.getPEstate('PC')
(top_bit, bottom_bit) = self.LEVEL_BITS[aCurLevel]
#mask vpn from faulting address put in R3
self.mAssemblyHelper.genShiftRightImmediate(self._mR3, bottom_bit, self._mFaultAddrRegIndex)
self.mAssemblyHelper.genAndImmediate(self._mR3, self.LEVEL_MASK)
self.mAssemblyHelper.genShiftLeftImmediate(self._mR3, self.PTESIZE_SHIFT[48])
#mask add page offset to base
self.mAssemblyHelper.genAddRegister(self._mR3, self._mAtpRegIndex)
self.mAssemblyHelper.genMoveRegister(self._mPteAddrRegIndex, self._mR3)
#load pte from memory
self.mAssemblyHelper.genLoadMemory(self._mR2, self._mR3, 0)
self.mAssemblyHelper.genMoveRegister(self._mPteRegIndex, self._mR2)
#check pte pointer
self.mAssemblyHelper.genMoveImmediate(self._mR1, self.PTE_PTR_VAL)
self.mAssemblyHelper.genAndImmediate(self._mR2, self.PTE_XWRV_MASK)
self.mAssemblyHelper.genConditionalBranchToLabel(self._mR2, self._mR1, 4, 'EQ', 'NEXT LEVEL WALK %d' % aCurLevel)
#if PTE is a leaf node, we can return.
self.mAssemblyHelper.genReturn()
#otherwise, setup next level walk addr from descriptor ppn
self.mAssemblyHelper.addLabel('NEXT LEVEL WALK %d' % aCurLevel)
self.mAssemblyHelper.genAddImmediate(self._mWalkLevelRegIndex, -1)
self.mAssemblyHelper.genMoveRegister(self._mR1, self._mPteRegIndex)
self.mAssemblyHelper.genAndImmediate(self._mR1, self.PPN_MASK)
self.mAssemblyHelper.genShiftRightImmediate(self._mR1, self.PPN_SHIFT)
self.mAssemblyHelper.genShiftLeftImmediate(self._mAtpRegIndex, self._mR1, self.PTE_SHIFT)
|
the-stack_0_7590 | from unittest import TestCase
from plenario.database import postgres_session, postgres_engine
import sqlalchemy as sa
from sqlalchemy import Table, Column, Integer, Date, Float, String, TIMESTAMP, MetaData, Text
from sqlalchemy.exc import NoSuchTableError
from geoalchemy2 import Geometry
from plenario.etl.point import Staging, PlenarioETL
import os
import json
from datetime import date
from plenario.models import MetaTable
from manage import init
pwd = os.path.dirname(os.path.realpath(__file__))
fixtures_path = os.path.join(pwd, '../fixtures')
def drop_if_exists(table_name):
try:
t = Table(table_name, MetaData(), extend_existing=True)
t.drop(postgres_engine, checkfirst=True)
except NoSuchTableError:
pass
def drop_meta(table_name):
del_ = "DELETE FROM meta_master WHERE dataset_name = '{}';".format(table_name)
postgres_engine.execute(del_)
class StagingTableTests(TestCase):
"""
Given a dataset is present in MetaTable,
can we grab a current csv of the underlying data from wherever it lives
and then make that into a free-standing table?
"""
@classmethod
def setUpClass(cls):
init()
cls.dog_path = os.path.join(fixtures_path, 'dog_park_permits.csv')
cls.radio_path = os.path.join(fixtures_path, 'community_radio_events.csv')
cls.opera_path = os.path.join(fixtures_path, 'public_opera_performances.csv')
cls.expected_radio_col_names = ['lat', 'lon', 'event_name', 'date']
cls.expected_dog_col_names = ['lat', 'lon', 'hooded_figure_id', 'date']
def setUp(self):
postgres_session.rollback()
# Ensure we have metadata loaded into the database
# to mimic the behavior of metadata ingestion preceding file ingestion.
drop_meta('dog_park_permits')
drop_meta('community_radio_events')
drop_meta('public_opera_performances')
# Make new MetaTable objects
self.unloaded_meta = MetaTable(url='nightvale.gov/events.csv',
human_name='Community Radio Events',
business_key='Event Name',
observed_date='Date',
latitude='lat', longitude='lon',
approved_status=True)
self.existing_meta = MetaTable(url='nightvale.gov/dogpark.csv',
human_name='Dog Park Permits',
business_key='Hooded Figure ID',
observed_date='Date',
latitude='lat', longitude='lon',
approved_status=False)
self.opera_meta = MetaTable(url='nightvale.gov/opera.csv',
human_name='Public Opera Performances',
business_key='Event Name',
observed_date='Date',
location='Location',
approved_status=False)
postgres_session.add_all([self.existing_meta, self.opera_meta, self.unloaded_meta])
postgres_session.commit()
# Also, let's have one table pre-loaded...
self.existing_table = sa.Table('dog_park_permits', MetaData(),
Column('hooded_figure_id', Integer),
Column('point_date', TIMESTAMP, nullable=False),
Column('date', Date, nullable=True),
Column('lat', Float, nullable=False),
Column('lon', Float, nullable=False),
Column('hash', String(32), primary_key=True),
Column('geom', Geometry('POINT', srid=4326), nullable=True))
drop_if_exists(self.existing_table.name)
self.existing_table.create(bind=postgres_engine)
# ... with some pre-existing data
ins = self.existing_table.insert().values(hooded_figure_id=1,
point_date=date(2015, 1, 2),
lon=-87.6495076896,
lat=41.7915865543,
geom=None,
hash='addde9be7f59e95fc08e54e29b2a947f')
postgres_engine.execute(ins)
def tearDown(self):
postgres_session.close()
'''
Do the names of created columns match what we expect?
Would be nice to check types too, but that was too fragile.
'''
@staticmethod
def extract_names(columns):
return [c.name for c in columns]
def test_col_info_infer(self):
with Staging(self.unloaded_meta, source_path=self.radio_path)as s_table:
observed_names = self.extract_names(s_table.cols)
self.assertEqual(set(observed_names), set(self.expected_radio_col_names))
def test_col_info_existing(self):
with Staging(self.existing_meta, source_path=self.dog_path) as s_table:
observed_col_names = self.extract_names(s_table.cols)
self.assertEqual(set(observed_col_names), set(self.expected_dog_col_names))
def test_col_info_provided(self):
# The frontend should send back strings compatible with the COL_VALUES in etl.point
col_info_raw = [('event_name', 'string'),
('date', 'date'),
('lat', 'float'),
('lon', 'float')]
stored_col_info = [{'field_name': name, 'data_type': d_type}
for name, d_type in col_info_raw]
self.unloaded_meta.contributed_data_types = json.dumps(stored_col_info)
with Staging(self.unloaded_meta, source_path=self.radio_path) as s_table:
observed_names = self.extract_names(s_table.cols)
self.assertEqual(set(observed_names), set(self.expected_radio_col_names))
'''
Are the files ingested as we expect?
'''
def test_staging_new_table(self):
# For the entry in MetaTable without a table, create a staging table.
# We'll need to read from a fixture csv.
with Staging(self.unloaded_meta, source_path=self.radio_path) as s_table:
with postgres_engine.begin() as connection:
all_rows = connection.execute(s_table.table.select()).fetchall()
self.assertEqual(len(all_rows), 5)
def test_staging_existing_table(self):
# With a fixture CSV whose columns match the existing dataset,
# create a staging table.
with Staging(self.existing_meta, source_path=self.dog_path) as s_table:
with postgres_engine.begin() as connection:
all_rows = connection.execute(s_table.table.select()).fetchall()
self.assertEqual(len(all_rows), 5)
def test_insert_data(self):
etl = PlenarioETL(self.existing_meta, source_path=self.dog_path)
etl.update()
existing = self.existing_table
all_rows = postgres_session.execute(existing.select()).fetchall()
self.assertEqual(len(all_rows), 5)
def test_update_no_change(self):
etl = PlenarioETL(self.existing_meta, source_path=self.dog_path)
etl.update()
# We're just checking that an exception doesn't get thrown.
etl = PlenarioETL(self.existing_meta, source_path=self.dog_path)
etl.update()
def test_update_with_delete(self):
etl = PlenarioETL(self.existing_meta, source_path=self.dog_path)
etl.update()
# The same source CSV, but with one less record
deleted_path = os.path.join(fixtures_path, 'dog_park_permits_deleted.csv')
etl = PlenarioETL(self.existing_meta, source_path=deleted_path)
etl.update()
all_rows = postgres_session.execute(self.existing_table.select()).fetchall()
self.assertEqual(len(all_rows), 4)
def test_update_with_change(self):
drop_if_exists(self.unloaded_meta.dataset_name)
etl = PlenarioETL(self.unloaded_meta, source_path=self.radio_path)
table = etl.add()
changed_path = os.path.join(fixtures_path, 'community_radio_events_changed.csv')
etl = PlenarioETL(self.unloaded_meta, source_path=changed_path)
etl.update()
sel = sa.select([table.c.date]).where(table.c.event_name == 'baz')
changed_date = postgres_engine.execute(sel).fetchone()[0]
self.assertEqual(changed_date, date(1993, 11, 10))
def test_new_table(self):
drop_if_exists(self.unloaded_meta.dataset_name)
etl = PlenarioETL(self.unloaded_meta, source_path=self.radio_path)
new_table = etl.add()
all_rows = postgres_session.execute(new_table.select()).fetchall()
self.assertEqual(len(all_rows), 5)
postgres_session.close()
new_table.drop(postgres_engine, checkfirst=True)
# Did we add a bbox?
bbox = MetaTable.get_by_dataset_name('community_radio_events').bbox
self.assertIsNotNone(bbox)
def test_new_table_has_correct_column_names_in_meta(self):
drop_if_exists(self.unloaded_meta.dataset_name)
etl = PlenarioETL(self.unloaded_meta, source_path=self.radio_path)
new_table = etl.add()
columns = postgres_session.query(MetaTable.column_names)
columns = columns.filter(MetaTable.dataset_name == self.unloaded_meta.dataset_name)
columns = columns.first()[0]
self.assertEqual(len(columns), 4)
postgres_session.close()
new_table.drop(postgres_engine, checkfirst=True)
def test_location_col_add(self):
drop_if_exists(self.opera_meta.dataset_name)
etl = PlenarioETL(self.opera_meta, source_path=self.opera_path)
new_table = etl.add()
all_rows = postgres_session.execute(new_table.select()).fetchall()
self.assertEqual(len(all_rows), 5)
postgres_session.close()
new_table.drop(postgres_engine, checkfirst=True)
# Did we add a bbox?
bbox = MetaTable.get_by_dataset_name('public_opera_performances').bbox
self.assertIsNotNone(bbox)
def test_location_col_update(self):
drop_if_exists(self.opera_meta.dataset_name)
self.opera_table = sa.Table(self.opera_meta.dataset_name, MetaData(),
Column('event_name', String, primary_key=True),
Column('date', Date, nullable=True),
Column('location', String, nullable=False),
Column('geom', Geometry('POINT', srid=4326), nullable=True),
Column('point_date', TIMESTAMP, nullable=False))
drop_if_exists(self.existing_table.name)
self.opera_table.create(bind=postgres_engine)
ins = self.opera_table.insert().values(event_name='quux',
date=None,
point_date=date(2015, 1, 2),
location='(-87.6495076896,41.7915865543)',
geom=None)
postgres_engine.execute(ins)
|
the-stack_0_7592 | site = {
'html': {
'head': {
'title': 'Мой сайт'
},
'body': {
'h2': 'Здесь будет мой заголовок',
'div': 'Тут, наверное, какой-то блок',
'p': 'А вот здесь новый абзац'
}
}
}
def f(word,depth, data, i=1):
for is_key in data:
if word in is_key:
return data[is_key]
for is_key in data.values():
if isinstance(is_key, dict):
res = f(word, depth, is_key, i+1)
if res and i < depth:
return res
ask = input('Искомый ключ: ')
depth = int(input('Введите глубину: '))
if f(ask, depth, site):
print(f(ask, depth, site))
else:
print('Такого ключа нет в этой глубине') |
the-stack_0_7593 | # pylint: disable=no-name-in-module
# pylint: disable=no-self-argument
import psycopg2
import json
from pydantic import BaseModel, ValidationError
conn = psycopg2.connect(database="digital_breakthrough", user="postgres",
password="4432", host="26.173.145.160", port="5434")
cursor = conn.cursor()
class json_serializable(BaseModel):
data = [{}]
def add_features(self, name, value, id):
self.data[id][name] = value
def add_new_features_item(self):
self.data.append({})
#
# Шаблонные функции на получение данных из БД
# ----------------------------------------------------------------------------------
#
def get_data(query):
cursor.execute(query)
results = cursor.fetchall()
return results
def jsonify(some_tuple):
results = json.dumps(some_tuple, ensure_ascii=False,
separators=(',', ': '))
return results
#
# ----------------------------------------------------------------------------------
#
# Вход работника департамента
def login_emp(login, password):
logins = get_data("select lower(user_login), user_password from dwh.is_users e")
check = False
for item in logins:
if login == item[0] and password == item[1]:
check = True
if check == True:
return 'success'
else:
return 'wrong'
# Выборка СПО
def spo_select():
return(jsonify(get_data(
"select "
"doab.org_id, "
"doab.org_name "
"from dwh.dim_organizations_and_branches doab "
)))
# Выборка направлений подготовки
def prof_select():
return(jsonify(get_data(
"select "
"td.training_direction_id, "
"td.training_direction_name "
"from dwh.dim_training_direction td"
)))
# Подсчёт кол-ва студентов
def student_qnt():
return(jsonify(get_data(
"select "
"count(fe.enrollment_id) "
"from dwh.fct_enrollment fe "
"left join dwh.fct_documents_acceptance fda "
"on fda.document_acceptance_id = fe.document_acceptance_id "
"left join dwh.fct_enrollee fe2 "
"on fe2.enrollee_id = fda.enrollee_id "
"and fe2.student = true"
)))
# Подсчёт кол-ва учреждений
def orgs_qnt():
return(jsonify(get_data(
"select count(*) from dwh.dim_organizations_and_branches"
)))
# Круговая диаграмма кол-ва абитуриентов по годам
def piechart():
data = get_data(
"select "
"count(fe.enrollment_id) qnt,"
"extract('year' from fe.enrollment_dttm) acc_year "
"from dwh.fct_enrollment fe "
"group by "
"extract('year' from fe.enrollment_dttm)"
)
json_inf = json_serializable()
object_id = 0
for i in range(0, len(data)):
json_inf.add_features("year", str(int(data[i][1])), i)
json_inf.add_features("students", data[i][0], i)
json_inf.add_new_features_item()
object_id += 1
del json_inf.data[-1]
return(jsonify(json_inf.dict()))
def checkbox_report():
return(jsonify(get_data(
"select "
"acc_year::varchar(4),"
"org_name,"
"training_direction_name,"
"orphan,"
"disabled,"
"sport_ach "
"from dwh.big_rep "
)))
# Список лет поступления
def years():
return(jsonify(get_data(
"select distinct "
"extract('year' from fe.enrollment_dttm)::varchar(4) "
"from dwh.fct_enrollment fe"
)))
|
the-stack_0_7594 | # Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client side implementation of search criteria.
This module provides some easy factory methods to do common search operations. It also exposes
lower level RPC functionality for procedural searches.
==============
Usage examples
==============
The following example illustrates searching using the high level API::
jobs = getJobs(show=["pipe"])
An example of a procedural search::
s = JobSearch()
s.shows.append("pipe")
s.users.append("chambers")
jobs = s.find()
A procedural example searching by regular expression::
s = JobSearch()
s.includeFinished = True
s.regex.append("blah")
for job in s.find():
print job
Another procedural example::
for job in JobSearch.byUser(["chambers","jwelborn"]):
job.proxy.kill()
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from builtins import object
import logging
import six
# pylint: disable=cyclic-import
from opencue.compiled_proto import criterion_pb2
from opencue.compiled_proto import host_pb2
from opencue.compiled_proto import job_pb2
import opencue.wrappers.host
from .cuebot import Cuebot
logger = logging.getLogger("opencue")
__all__ = ["BaseSearch",
"ProcSearch",
"FrameSearch",
"HostSearch",
"JobSearch"]
class BaseSearch(object):
"""Base class for searching."""
def __init__(self, **options):
self.options = options
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.options == other.options
def search(self):
"""Executes the search using the options provided at initiation."""
return self.byOptions(**self.options)
@classmethod
def byOptions(cls, **options):
"""Executes the search using the provided options."""
raise NotImplementedError
class ProcSearch(BaseSearch):
"""Class for searching for procs.
See: help(opencue.getProcs)"""
@staticmethod
def criteriaFromOptions(**options):
"""Constructs a search criteria object for the given options."""
return _setOptions(host_pb2.ProcSearchCriteria(), options)
@classmethod
def byOptions(cls, **options):
"""Executes the search using the given options."""
criteria = cls.criteriaFromOptions(**options)
return Cuebot.getStub('proc').GetProcs(
host_pb2.ProcGetProcsRequest(r=criteria), timeout=Cuebot.Timeout)
class FrameSearch(BaseSearch):
"""Class for searching for frames."""
page = 1
limit = 1000
change_date = 0
@classmethod
def criteriaFromOptions(cls, **options):
"""Constructs a search criteria object for the given options."""
criteria = _setOptions(job_pb2.FrameSearchCriteria(), options)
criteria.page = options.get('page', cls.page)
criteria.limit = options.get('limit', cls.limit)
criteria.change_date = options.get('change_date', cls.change_date)
return criteria
# pylint: disable=arguments-differ
@classmethod
def byOptions(cls, job, **options):
criteria = cls.criteriaFromOptions(**options)
return Cuebot.getStub('frame').GetFrames(job_pb2.FrameGetFramesRequest(job=job, r=criteria),
timeout=Cuebot.Timeout)
@classmethod
def byRange(cls, job, val):
"""Executes a search by frame range."""
cls.byOptions(job, frame_range=val)
class HostSearch(BaseSearch):
"""Class for searching for hosts."""
@staticmethod
def criteriaFromOptions(**options):
"""Constructs a search criteria object for the given options."""
return _setOptions(host_pb2.HostSearchCriteria(), options)
@classmethod
def byOptions(cls, **options):
criteria = cls.criteriaFromOptions(**options)
return [
opencue.wrappers.host.Host(host) for host in Cuebot.getStub('host').GetHosts(
host_pb2.HostGetHostsRequest(r=criteria), timeout=Cuebot.Timeout).hosts.hosts]
@classmethod
def byName(cls, val):
"""Searches for a host by name."""
return cls.byOptions(name=val)
@classmethod
def byRegex(cls, val):
"""Searches for a host by regular expression."""
return cls.byOptions(regex=val)
@classmethod
def byId(cls, val):
"""Searches for a host by id."""
return cls.byOptions(id=val)
@classmethod
def byMatch(cls, val):
"""Searches for a host by substring match."""
return cls.byOptions(substr=val)
@classmethod
def byAllocation(cls, val):
"""Searches for a host by allocation."""
return cls.byOptions(alloc=val)
class JobSearch(BaseSearch):
"""Class for searching for jobs."""
@staticmethod
def criteriaFromOptions(**options):
"""Constructs a search criteria object for the given options."""
return _setOptions(job_pb2.JobSearchCriteria(), options)
@classmethod
def byOptions(cls, **options):
criteria = cls.criteriaFromOptions(**options)
return Cuebot.getStub('job').GetJobs(
job_pb2.JobGetJobsRequest(r=criteria), timeout=Cuebot.Timeout)
@classmethod
def byName(cls, val):
"""Searches for a job by name."""
return cls.byOptions(job=val)
@classmethod
def byId(cls, val):
"""Searches for a job by id."""
return cls.byOptions(id=val)
@classmethod
def byRegex(cls, val):
"""Searches for a job by regex."""
return cls.byOptions(regex=val)
@classmethod
def byMatch(cls, val):
"""Searches for a job by substring match."""
return cls.byOptions(substr=val)
@classmethod
def byShow(cls, val):
"""Searches for a job by show."""
return cls.byOptions(show=val)
@classmethod
def byShot(cls, val):
"""Searches for a job by shot."""
return cls.byOptions(shots=val)
@classmethod
def byUser(cls, val):
"""Searches for a job by user."""
return cls.byOptions(user=val)
def _append(stuff, item):
if isinstance(item, (tuple, list, set)):
stuff.extend(item)
else:
stuff.append(item)
def _createCriterion(search, searchType, convert=None):
"""handleCriterion
returns the proper subclass of FloatSearchCriterion or IntSearchCriterion
based on input from the user. There are a few formats which are accepted.
float/int - GreaterThan[searchType]SearchCriterion
string -
gt<value> - GreaterThan[searchType]SearchCriterion
lt<value> - LessThan[searchType]SearchCriterion
min-max - InRange[searchType]SearchCriterion
:type search: String or Int or Float
:param search: The search desired: 'gt#', 'lt#', '#-#'.
'#' or # is assumed greater than.
:type searchType: Int or Float
:param searchType: The type of search criterion required
:type convert: callable
:param convert: Optional callable to convert the input to the units the
cuebot uses. ie: hours to seconds.
:rtype: SearchCriterion
:return: A SearchCriterion object"""
def _convert(val):
if not convert:
return searchType(val)
return searchType(convert(searchType(val)))
if isinstance(search, (int, float)) or \
isinstance(search, str) and search.isdigit():
search = "gt%s" % search
if searchType == float:
searchTypeStr = "Float"
elif searchType == int:
searchTypeStr = "Integer"
else:
raise ValueError("Unknown searchType, must be Int or Float")
if search.startswith("gt"):
criterion = getattr(criterion_pb2,
"GreaterThan%sSearchCriterion" % searchTypeStr)
return criterion(_convert(search[2:]))
if search.startswith("lt"):
criterion = getattr(criterion_pb2,
"LessThan%sSearchCriterion" % searchTypeStr)
return criterion(_convert(search[2:]))
if search.find("-") > -1:
criterion = getattr(criterion_pb2,
"InRange%sSearchCriterion" % searchTypeStr)
min_range, max_range = search.split("-")
return criterion(_convert(min_range), _convert(max_range))
raise ValueError("Unable to parse this format: %s" % search)
def _raiseIfNotType(searchOption, value, expectedType):
if not isinstance(value, list):
raise TypeError("Failed to set search option: '{}'. Expects type '{}', but got {}.".format(
searchOption, expectedType, type(value)))
def raiseIfNotList(searchOption, value):
"""Raises an exception if the provided value is not a list."""
_raiseIfNotType(searchOption, value, list)
def _setOptions(criteria, options):
for k, v in options.items():
if k == "job" or (k == "name" and isinstance(criteria, job_pb2.JobSearchCriteria)):
raiseIfNotList(k, v)
criteria.jobs.extend(v)
elif k == "host" or (k == "name" and isinstance(criteria, host_pb2.HostSearchCriteria)):
raiseIfNotList(k, v)
criteria.hosts.extend(v)
elif k == "frames" or (k == "name" and isinstance(criteria, job_pb2.FrameSearchCriteria)):
raiseIfNotList(k, v)
criteria.frames.extend(v)
elif k in("match", "substr"):
raiseIfNotList(k, v)
criteria.substr.extend(v)
elif k == "regex":
raiseIfNotList(k, v)
criteria.regex.extend(v)
elif k == "id":
raiseIfNotList(k, v)
criteria.ids.extend(v)
elif k == "show":
raiseIfNotList(k, v)
criteria.shows.extend(v)
elif k == "shot":
raiseIfNotList(k, v)
criteria.shots.extend(v)
elif k == "user":
raiseIfNotList(k, v)
criteria.users.extend(v)
elif k == "state" and isinstance(criteria, job_pb2.FrameSearchCriteria):
raiseIfNotList(k, v)
criteria.states.frame_states.extend(v)
elif k == "state" and isinstance(criteria, host_pb2.HostSearchCriteria):
raiseIfNotList(k, v)
criteria.states.state.extend(v)
elif k == "layer":
raiseIfNotList(k, v)
criteria.layers.extend(v)
elif k == "alloc":
raiseIfNotList(k, v)
criteria.allocs.extend(v)
elif k in ("range", "frames"):
if not v:
continue
if isinstance(criteria.frame_range, six.string_types):
# Once FrameSearch.frameRange is not a string
# this can go away
criteria.frame_range = v
else:
criteria.frame_range.append(_createCriterion(v, int))
elif k == "memory":
if not v:
continue
if isinstance(criteria.memory_range, six.string_types):
# Once FrameSearch.memoryRange is not a string
# this can go away
criteria.memory_range = v
else:
criteria.memory_range.append(
_createCriterion(v, int, lambda mem: (1048576 * mem)))
elif k == "duration":
if not v:
continue
if isinstance(criteria.duration_range, six.string_types):
# Once FrameSearch.durationRange is not a string
# this can go away
criteria.duration_range = v
else:
criteria.duration_range.append(
_createCriterion(v, int, lambda duration: (60 * 60 * duration)))
elif k == "limit":
criteria.max_results = int(v)
elif k == "offset":
criteria.first_result = int(v)
elif k == "include_finished":
criteria.include_finished = v
return criteria
|
the-stack_0_7595 | # qubit number=4
# total number=11
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += CNOT(0,2) # number=8
prog += X(2) # number=9
prog += CNOT(0,2) # number=10
prog += H(3) # number=4
prog += SWAP(1,0) # number=6
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil210.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
the-stack_0_7597 | from . import animal, pet
def main():
clifford = pet.create_pet('Clifford', 'Dog', 12)
print(('This dog is %d years old.' % clifford.get_age()))
print(clifford)
print((clifford.get_name()))
if __name__ == '__main__':
main()
|
the-stack_0_7599 | """Baseline train
- Author: Junghoon Kim
- Contact: [email protected]
"""
import argparse
from datetime import datetime
import os
import yaml
from typing import Any, Dict, Tuple, Union
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.models
from pytz import timezone
from src.dataloader import create_dataloader
from src.loss import CustomCriterion, CosineAnnealingWarmupRestarts
from src.model import Model
from src.trainer_kd_wandb import TorchTrainer
from src.utils.common import get_label_counts, read_yaml
from src.utils.macs import calc_macs
from src.utils.torch_utils import check_runtime, model_info
import wandb
class Shufflenet_v205(nn.Module):
def __init__(self, num_classes:int = 9):
super().__init__()
self.model = torchvision.models.shufflenet_v2_x0_5(pretrained=False)
self.model.fc = torch.nn.Linear(1024, num_classes)
def forward(self, x):
return self.model(x)
def train_kd(
teacher_pretrained: str,
model_name: str,
model_config: Dict[str, Any],
student_pretrained: str,
data_config: Dict[str, Any],
log_name: str,
log_dir: str,
fp16: bool,
device: torch.device,
) -> Tuple[float, float, float]:
"""Train."""
# save model_config, data_config
with open(os.path.join(log_dir, 'data.yml'), 'w') as f:
yaml.dump(data_config, f, default_flow_style=False)
with open(os.path.join(log_dir, 'model.yml'), 'w') as f:
yaml.dump(model_config, f, default_flow_style=False)
teacher_model = Shufflenet_v205(num_classes=9)
teacher_pretrained = teacher_pretrained
if os.path.isfile(teacher_pretrained):
teacher_model.load_state_dict(torch.load(teacher_pretrained))
print("teacher pretrained model loaded.")
teacher_model.model.to(device)
model_instance = Model(model_config, verbose=True)
# print(model_instance.model)
# timm.create_model(model_name='resnetv2_101x1_bitm',pretrained=True,num_classes=9)
model_path = os.path.join(log_dir, "best.pt")
print(f"Model save path: {model_path}")
# if there is student pretrained, then load
print(student_pretrained)
print(os.path.isfile(student_pretrained))
if os.path.isfile(student_pretrained):
model_instance.model.load_state_dict(torch.load(student_pretrained, map_location=device))
print("student pretrained model load")
model_instance.model.to(device)
# Create dataloader
train_dl, val_dl, test_dl = create_dataloader(data_config)
# Calc macs
macs = calc_macs(model_instance.model, (3, data_config["IMG_SIZE"], data_config["IMG_SIZE"]))
print(f"macs: {macs}")
# sglee 브랜치 테스트.
# sglee487 브랜치 테스트.
# Create optimizer, scheduler, criterion
# optimizer = torch.optim.SGD(model_instance.model.parameters(), lr=data_config["INIT_LR"], momentum=0.9)
optimizer = torch.optim.AdamW(model_instance.model.parameters(), lr=data_config["INIT_LR"])
# scheduler = torch.optim.lr_scheduler.OneCycleLR(
# optimizer=optimizer,
# max_lr=data_config["INIT_LR"],
# steps_per_epoch=len(train_dl),
# epochs=data_config["EPOCHS"],
# pct_start=0.05,
# )
first_cycle_steps = 4000
scheduler = CosineAnnealingWarmupRestarts(optimizer=optimizer,
first_cycle_steps=first_cycle_steps,
max_lr=data_config["INIT_LR"],
min_lr=0.0000001,
warmup_steps=int(first_cycle_steps * 0.15),
gamma=0.5)
criterion = CustomCriterion(
samples_per_cls=get_label_counts(data_config["DATA_PATH"])
if data_config["DATASET"] == "TACO"
else None,
device=device,
)
# Amp loss scaler
scaler = (
torch.cuda.amp.GradScaler() if fp16 and device != torch.device("cpu") else None
)
# Create trainer
trainer = TorchTrainer(
model_name=model_name,
model=model_instance.model,
model_macs=macs,
log_name=log_name,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
scaler=scaler,
device=device,
model_path=model_path,
verbose=1,
)
wandb.log({"Model/TeacherModel": teacher_model.__class__.__name__})
best_acc, best_f1 = trainer.train(
teacher_model=teacher_model,
train_dataloader=train_dl,
n_epoch=data_config["EPOCHS"],
val_dataloader=val_dl if val_dl else test_dl,
)
# evaluate model with test set
model_instance.model.load_state_dict(torch.load(model_path))
test_loss, test_f1, test_acc = trainer.test(
model=model_instance.model, test_dataloader=val_dl if val_dl else test_dl
)
return test_loss, test_f1, test_acc
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train model.")
parser.add_argument(
"--model", default="configs/model/shufflenetv205_2021-06-08_04-25-40.yaml", type=str, help="model config"
)
parser.add_argument(
"--data", default="configs/data/taco_3_continue.yaml", type=str, help="data config"
)
parser.add_argument(
"--teacher_pretrained", default="expsaves/shufflenet_v2_x0_5_pretrained_2021-06-09_19-14-44/best.pt",
type=str, help="to load student pretrained weight"
)
parser.add_argument(
"--student_pretrained", default="expsaves/shufflenetv205_2021-06-08_04-25-40/best.pt",
type=str, help="to load student pretrained weight"
)
args = parser.parse_args()
model_name = args.model.split('/')[-1].split('.yaml')[0]
model_config = read_yaml(cfg=args.model)
data_config = read_yaml(cfg=args.data)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
now_str = datetime.now(timezone('Asia/Seoul')).strftime("%Y-%m-%d_%H-%M-%S")
log_name = f"{model_name}_{now_str}"
log_dir = os.path.join('exp', f"{model_name}_{now_str}")
os.makedirs(log_dir, exist_ok=True)
wandb.init(project='pstage4', reinit=False, name=log_name)
test_loss, test_f1, test_acc = train_kd(
teacher_pretrained=args.teacher_pretrained,
model_name=model_name,
model_config=model_config,
student_pretrained=args.student_pretrained,
data_config=data_config,
log_name=log_name,
log_dir=log_dir,
fp16=data_config["FP16"],
device=device,
)
|
the-stack_0_7600 | import codecs
import binascii
def xor(bytes, value):
result = []
for i in range(len(bytes)):
result.append(bytes[i] ^ value)
return bytearray(result)
# ascii a-z characters are 97-122, or 0x61-0x7a
# capitals, spaces
# if many characters outside that range, consider it garbage
def englishness(bytes):
count = 0
for i in bytes:
if is_garbage(i):
count+=1
return len(bytes)-count
def is_garbage(byte):
if byte == 32: return False #space
if byte >= 65 and byte <= 90: return False #uppercase
if byte >= 97 and byte <= 122: return False #lowercase
return True
input = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736"
print(input)
bytes = codecs.decode(input, 'hex')
candidates = []
for i in range(255):
decoded = xor(bytes, i)
score = englishness(decoded)
if score > 20:
decoded_string = ''.join(chr(i) for i in decoded)
candidates.append((score, decoded))
candidates.sort(key = lambda tup: tup[0])
for i in range(len(candidates)):
print(candidates[i][1]) # last one printed is most likely
"""
bytes = xor(bytes, 3)
print(binascii.hexlify(bytearray(bytes)))
bytes = xor(bytes, 3)
print(binascii.hexlify(bytearray(bytes)))
"""
|
the-stack_0_7604 | import re
import math
from ..exception import PygpException
from .vertex import Vertex
from .halfedge import Halfedge
from .face import Face
from .objmesh import ObjMesh
class TriMesh(object):
def __init__(self, filename=''):
self.vertices = []
self.halfedges = []
self.faces = []
self.indices = []
if filename != '':
self.load(filename)
def load(self, filename):
obj = ObjMesh(filename)
unique_vertices = {}
for i in obj.indices:
vx = obj.vertices[i * 3 + 0]
vy = obj.vertices[i * 3 + 1]
vz = obj.vertices[i * 3 + 2]
v = (vx, vy, vz)
if v not in unique_vertices:
unique_vertices[v] = len(self.vertices)
self.vertices.append(Vertex(v[0], v[1], v[2]))
self.vertices[-1].index = unique_vertices[v]
self.indices.append(unique_vertices[v])
self._make_halfedge()
def save(self, filename):
with open(filename, 'w') as fp:
for v in self.vertices:
fp.write('v {0:.6f} {1:.6f} {2:.6f}\n'.format(v.x, v.y, v.z))
for i in range(0, len(self.indices), 3):
i0 = self.indices[i + 0] + 1
i1 = self.indices[i + 1] + 1
i2 = self.indices[i + 2] + 1
fp.write('f {0} {1} {2}\n'.format(i0, i1, i2))
def n_vertices(self):
return len(self.vertices)
def n_faces(self):
return len(self.faces)
def collapse_halfedge(self, v_from, v_to, update_position=None):
if v_from.degree() <= 3 or v_to.degree() <= 3:
raise PygpException('Invalid collapse operation!')
# Find target halfedge
target_halfedge = None
for he in v_from.halfedges():
if he.vertex_from is v_from and he.vertex_to is v_to:
target_halfedge = he
break
if target_halfedge is None:
raise PygpException('Specified halfedge does not exist!')
reverse_halfedge = target_halfedge.opposite
# Update v_to's halfedge
target_halfedge.vertex_to.halfedge = target_halfedge.next.opposite.next
# Update halfedges of surrounding vertices
if target_halfedge.face is not None:
target_halfedge.next.vertex_to.halfedge = target_halfedge.next.opposite
if reverse_halfedge.face is not None:
reverse_halfedge.next.vertex_to.halfedge = reverse_halfedge.next.opposite
# Update topology
if target_halfedge.face is not None:
he0 = target_halfedge.next.opposite
he1 = target_halfedge.next.next.opposite
he0.opposite, he1.opposite = he1, he0
if reverse_halfedge.face is not None:
he2 = reverse_halfedge.next.opposite
he3 = reverse_halfedge.next.next.opposite
he2.opposite, he3.opposite = he3, he2
# Update topology for boundary vertices
if reverse_halfedge.face is None:
for he in target_halfedge.vertex_to.halfedges():
if he.opposite.next is reverse_halfedge:
he.opposite.next = reverse_halfedge.next
break
if target_halfedge.face is None:
for he in reverse_halfedge.vertex_to.halfedges():
if he.opposite.next is target_halfedge:
he.opposite.next = target_halfedge.next
break
for he in target_halfedge.vertex_to.halfedges():
he.vertex_from = target_halfedge.vertex_to
he.opposite.vertex_to = target_halfedge.vertex_to
# Delete/update vertex
self.vertices[target_halfedge.vertex_from.index] = None
if update_position is not None:
self.vertices[target_halfedge.vertex_to.index].position = update_position
# Remove faces
if target_halfedge.face is not None:
self.faces[target_halfedge.face.index] = None
if reverse_halfedge.face is not None:
self.faces[reverse_halfedge.face.index] = None
# Delete halfedge
self.halfedges[target_halfedge.index] = None
self.halfedges[reverse_halfedge.index] = None
def flip_halfedge(self, he):
rev = he.opposite
if rev.face is None:
raise PygpException('Flip method is called for boundary halfedge!')
# Get surronding vertices, halfedges and faces
v0 = he.vertex_to
v1 = he.next.vertex_to
v2 = rev.next.vertex_to
v3 = rev.vertex_to
he0 = he.next
he1 = he.next.next
he2 = rev.next.next
he3 = rev.next
f0 = he.face
f1 = rev.face
# Update halfedges of to/from vertices
v0.halfedge = he0
v3.halfedge = he3
# Update halfedge's source and destination
he.vertex_from = v1
he.vertex_to = v2
rev.vertex_from = v2
rev.vertex_to = v1
# Update face circulation
he.next = he2
he2.next = he0
he0.next = he
rev.next = he1
he1.next = he3
he3.next = rev
# Update faces
f0.halfedge = he
he.face = f0
he2.face = f0
he0.face = f0
f1.halfedge = rev
rev.face = f1
he1.face = f1
he3.face = f1
def clean(self):
# Compute new vertex indices
count = 0
new_index_table = [ 0 ] * self.n_vertices()
for i, v in enumerate(self.vertices):
new_index_table[i] = count
if v is not None:
count += 1
# Update vertex array
self.vertices = [ v for v in self.vertices if v is not None ]
for i, v in enumerate(self.vertices):
v.index = i
# Update halfedge array
self.halfedges = [ he for he in self.halfedges if he is not None ]
for i, he in enumerate(self.halfedges):
he.index = i
self.faces = [ f for f in self.faces if f is not None ]
for i, f in enumerate(self.faces):
f.index = i
self.indices = [ -1 ] * (len(self.faces) * 3)
for i, f in enumerate(self.faces):
vs = list(f.vertices())
assert len(vs) == 3
self.indices[i * 3 + 0] = vs[0].index
self.indices[i * 3 + 1] = vs[1].index
self.indices[i * 3 + 2] = vs[2].index
assert vs[0].index < len(self.vertices)
assert vs[1].index < len(self.vertices)
assert vs[2].index < len(self.vertices)
def _make_halfedge(self):
table = [ [] for i in range(len(self.vertices)) ]
self.halfedges.clear()
self.faces.clear()
for i in range(0, len(self.indices), 3):
he0 = Halfedge()
he1 = Halfedge()
he2 = Halfedge()
he0.vertex_from = self.vertices[self.indices[i + 0]]
he1.vertex_from = self.vertices[self.indices[i + 1]]
he2.vertex_from = self.vertices[self.indices[i + 2]]
he0.vertex_to = self.vertices[self.indices[i + 1]]
he1.vertex_to = self.vertices[self.indices[i + 2]]
he2.vertex_to = self.vertices[self.indices[i + 0]]
assert he0.vertex_from.index != he0.vertex_to.index
assert he1.vertex_from.index != he1.vertex_to.index
assert he2.vertex_from.index != he2.vertex_to.index
he0.next = he1
he1.next = he2
he2.next = he0
self.vertices[self.indices[i + 0]].halfedge = he0
self.vertices[self.indices[i + 1]].halfedge = he1
self.vertices[self.indices[i + 2]].halfedge = he2
face = Face()
face.halfedge = he0
he0.face = face
he1.face = face
he2.face = face
self.halfedges.extend([ he0, he1, he2 ])
self.faces.append(face)
table[self.vertices[self.indices[i + 0]].index].append(he0)
table[self.vertices[self.indices[i + 1]].index].append(he1)
table[self.vertices[self.indices[i + 2]].index].append(he2)
# Set opposite halfedges
for he0 in self.halfedges:
for he1 in table[he0.vertex_to.index]:
if he0.vertex_from == he1.vertex_to and \
he1.vertex_from == he0.vertex_to:
he0.opposite = he1
he1.opposite = he0
break
# Opposite halfedge not found
# Mark vertices as border vertices
if he0.opposite is None:
he0.vertex_from.is_boundary = True
he0.vertex_to.is_boundary = True
he1 = Halfedge()
he1.vertex_from = he0.vertex_to
he1.vertex_to = he0.vertex_from
he1.opposite = he0
he0.opposite = he1
he1.vertex_from.halfedge = he1
self.halfedges.append(he1)
# Process border vertices
for v in self.vertices:
if v.is_boundary:
he = v.halfedge
while True:
if he.opposite.next is None:
he.opposite.next = v.halfedge
break
he = he.opposite.next
for i, he in enumerate(self.halfedges):
he.index = i
for i, f in enumerate(self.faces):
f.index = i
def verify(self):
for v in self.vertices:
if v is None:
continue
if v.index < 0:
return False
if v.halfedge is None:
return False
for he in self.halfedges:
if he is None:
continue
if he.index < 0:
return False
if he.vertex_from is None or he.vertex_to is None:
return False
if he.next is None:
return False
if he.opposite is None:
return False
if he.face is None:
return False
for f in self.faces:
if f is None:
continue
if f.index < 0:
return False
if f.halfedge is None:
return False
return True
def clear(self):
self.vertices.clear()
self.halfedges.clear()
self.faces.clear()
self.indices.clear()
|
the-stack_0_7605 | # From https://github.com/yunjey/stargan-v2-demo/tree/master/core
import torch
import torch.nn as nn
class MappingNetwork(nn.Module):
"""Mapping network: (latent z, domain y) -> (style s)."""
def __init__(self, latent_dim=64, style_dim=64, num_domains=2):
super(MappingNetwork, self).__init__()
self.num_domains = num_domains
hidden_dim = 512
self.shared = nn.Sequential(
nn.Linear(latent_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU())
self.unshared = nn.ModuleList()
for _ in range(num_domains):
self.unshared.append(
nn.Linear(hidden_dim, style_dim))
def forward(self, z, y):
"""
Inputs:
- z: latent vectors of shape (batch, latent_dim).
- y: domain labels of shape (batch).
Output:
- s: style vectors of shape (batch, style_dim).
"""
#z = z / torch.norm(z, p=2, dim=1, keepdim=True)
#z = z / (torch.sqrt(torch.mean(z**2, dim=1, keepdim=True)) + 1e-8)
h = self.shared(z)
outs = []
for i in range(self.num_domains):
out = self.unshared[i](h) # (batch, style_dim)
outs.append(out)
out = torch.stack(outs, dim=1) # (batch, num_domains, style_dim)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
s = out[idx, y] # (batch, style_dim)
#print('F_s: ', torch.mean(torch.var(s, dim=0, unbiased=False)))
return s |
the-stack_0_7606 | import numpy as np
np.random.seed(0)
class Initialization:
zeros_initialization = None
def _zeros_initialization(n_units: int, n_in: int):
W = np.zeros((n_units, n_in))
b = np.zeros((n_units, 1))
return W, b
def _weights_initialization(n_units, n_in):
# multiplying W by a small number makes the learning fast
# however from a practical point of view when multiplied by 0.01 using l>2 the NN does not converge
# that is because it runs into gradients vanishing problem
W = np.random.randn(n_units, n_in) * 0.01
b = np.zeros((n_units, 1))
return W, b
def _He_initialization(n_units, n_in):
""" Goes better with ReLU (a generalization this initializer is called variance_scaling_initializer)
:param n_units:
:param n_in:
:return:
"""
W = np.random.randn(n_units, n_in) * np.sqrt(2 / n_in)
b = np.zeros((n_units, 1))
return W, b
def _Xavier_initialization(n_units, n_in):
"""Initialize weight W using Xavier Initialization (also known as Glorot Initialization)
So if the input features of activations are roughly mean 0 and standard variance and variance 1 then this would
cause z to also take on a similar scale and this doesn't solve, but it definitely helps reduce the vanishing,
exploding gradients problem because it's trying to set each of the weight matrices W so that it's not
too much bigger than 1 and not too much less than 1 so it doesn't explode or vanish too quickly.
P.S. Goes better with Sigmoid and Softmax and tanh
"""
W = np.random.randn(n_units, n_in) * np.sqrt(1 / n_in)
b = np.zeros((n_units, 1))
return W, b
def _Benjio_initialization(n_units, n_in):
W = np.random.randn(n_units, n_in) * np.sqrt(2 / (n_in + n_units))
b = np.zeros((n_units, 1))
return W, b
if __name__ == '__main__':
pass
|
the-stack_0_7607 | from cryptography.exceptions import InvalidSignature, InvalidKey
from django.core.handlers.wsgi import WSGIRequest
from django.http import JsonResponse
from pyattest.exceptions import PyAttestException, InvalidNonceException, InvalidCertificateChainException, \
InvalidKeyIdException, ExtensionNotFoundException
from dreiattest.exceptions import DreiAttestException, UnsupportedEncryptionException, NoKeyForSessionException
relevant_base = (PyAttestException, DreiAttestException, InvalidSignature, InvalidKey)
nonce_mismatch = (InvalidNonceException,)
invalid_key = (InvalidCertificateChainException, InvalidKeyIdException, UnsupportedEncryptionException,
ExtensionNotFoundException, InvalidSignature, InvalidKey, NoKeyForSessionException)
class HandleDreiattestExceptionsMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
return self.get_response(request)
def process_exception(self, request: WSGIRequest, exception: Exception):
if isinstance(exception, relevant_base):
return self.handle(request, exception)
def handle(self, request: WSGIRequest, exception: Exception):
code = exception.__class__.__name__
if code.endswith('Exception'):
code = code[:-9]
response = JsonResponse(data={'code': code}, status=403)
response['Dreiattest-error'] = self.get_header(exception)
return response
def get_header(self, exception: Exception) -> str:
""" Set some custom headers for the mobile clients. """
if isinstance(exception, nonce_mismatch):
return 'dreiAttest_nonce_mismatch'
if isinstance(exception, invalid_key):
return 'dreiAttest_invalid_key'
return 'dreiAttest_policy_violation'
|
the-stack_0_7608 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
# TODO: improve compliance with handlebars spec and split in separate module (initially based on pybars, but need to change)
from functools import partial
import re
from netforce.pymeta.grammar import OMeta
import collections
from netforce import database
from netforce.model import get_model, fields, BrowseList
from netforce.locale import _
import datetime
import time
from netforce import access
import json
import tempfile
try:
import barcode
from barcode.writer import ImageWriter
except:
barcode = None
print("WARNING: pyBarcode not installed")
import math
import os
from pprint import pprint
from xml.sax import saxutils
handlebars_grammar = r"""
template ::= (<text> | <templatecommand>)*:body => ['template'] + body
text ::= (~(<start>) <anything>)+:text => ('literal', ''.join(text))
other ::= <anything>:char => ('literal', char)
templatecommand ::= <blockrule>
| <comment>
| <escapedexpression>
| <expression>
| <partial>
start ::= '{' '{'
finish ::= '}' '}'
comment ::= <start> '!' (~(<finish>) <anything>)* <finish> => ('comment', )
space ::= ' '|'\t'|'\r'|'\n'
arguments ::= (<space>+ (<kwliteral>|<literal>|<path>))*:arguments => arguments
expression_inner ::= <spaces> <path>:p <arguments>:arguments <spaces> <finish> => (p, arguments)
expression ::= <start> '{' <expression_inner>:e '}' => ('expand', ) + e
| <start> '&' <expression_inner>:e => ('expand', ) + e
escapedexpression ::= <start> <expression_inner>:e => ('escapedexpand', ) + e
block_inner ::= <spaces> <symbol>:s <arguments>:args <spaces> <finish>
=> (''.join(s), args)
alt_inner ::= <spaces> ('^' | 'e' 'l' 's' 'e') <spaces> <finish>
partial ::= <start> '>' <block_inner>:i => ('partial',) + i
path ::= ~('/') <pathseg>+:segments => ('path', segments)
kwliteral ::= <symbol>:s '=' (<literal>|<path>):v => ('kwparam', s, v)
literal ::= (<string>|<integer>|<boolean>):thing => ('literalparam', thing)
string ::= '"' <notquote>*:ls '"' => '"' + ''.join(ls) + '"'
integer ::= <digit>+:ds => int(''.join(ds))
boolean ::= <false>|<true>
false ::= 'f' 'a' 'l' 's' 'e' => False
true ::= 't' 'r' 'u' 'e' => True
notquote ::= <escapedquote> | (~('"') <anything>)
escapedquote ::= '\\' '"' => '\\"'
symbol ::= ~<alt_inner> '['? (<letterOrDigit>|'-'|'@')+:symbol ']'? => ''.join(symbol)
pathseg ::= <symbol>
| '/' => ''
| ('.' '.' '/') => '__parent'
| '.' => ''
pathfinish :expected ::= <start> '/' <path>:found ?(found == expected) <finish>
symbolfinish :expected ::= <start> '/' <symbol>:found ?(found == expected) <finish>
blockrule ::= <start> '#' <block_inner>:i
<template>:t <alttemplate>:alt_t <symbolfinish i[0]> => ('block',) + i + (t, alt_t)
| <start> '^' <block_inner>:i
<template>:t <symbolfinish i[0]> => ('invertedblock',) + i + (t,)
alttemplate ::= (<start> <alt_inner> <template>)?:alt_t => alt_t or []
"""
compile_grammar = """
compile ::= <prolog> <rule>* => builder.finish()
prolog ::= "template" => builder.start()
compile_block ::= <prolog_block> <rule>* => builder.finish_block()
prolog_block ::= "template" => builder.start_block()
rule ::= <literal>
| <expand>
| <escapedexpand>
| <comment>
| <block>
| <invertedblock>
| <partial>
block ::= [ "block" <anything>:symbol [<arg>*:arguments] [<compile_block>:t] [<compile_block>?:alt_t] ] => builder.add_block(symbol, arguments, t, alt_t)
comment ::= [ "comment" ]
literal ::= [ "literal" :value ] => builder.add_literal(value)
expand ::= [ "expand" <path>:value [<arg>*:arguments]] => builder.add_expand(value, arguments)
escapedexpand ::= [ "escapedexpand" <path>:value [<arg>*:arguments]] => builder.add_escaped_expand(value, arguments)
invertedblock ::= [ "invertedblock" <anything>:symbol [<arg>*:arguments] [<compile>:t] ] => builder.add_invertedblock(symbol, arguments, t)
partial ::= ["partial" <anything>:symbol [<arg>*:arguments]] => builder.add_partial(symbol, arguments)
path ::= [ "path" [<pathseg>:segment]] => ("simple", segment)
| [ "path" [<pathseg>+:segments] ] => ("complex", 'resolve(context, "' + '","'.join(segments) + '")' )
simplearg ::= [ "path" [<pathseg>+:segments] ] => 'resolve(context, "' + '","'.join(segments) + '")'
| [ "literalparam" <anything>:value ] => str(value)
arg ::= [ "kwparam" <anything>:symbol <simplearg>:a ] => str(symbol) + '=' + a
| <simplearg>
pathseg ::= "/" => ''
| "." => ''
| "" => ''
| "this" => ''
pathseg ::= <anything>:symbol => ''.join(symbol)
"""
compile_grammar = compile_grammar.format()
class strlist(list):
def __str__(self):
return ''.join(self)
def grow(self, thing):
if type(thing) == str:
self.append(thing)
else:
for element in thing:
self.grow(element)
_map = {
'&': '&',
'"': '"',
"'": ''',
'`': '`',
'<': '<',
'>': '>',
}
def substitute(match, _map=_map):
return _map[match.group(0)]
_escape_re = re.compile(r"&|\"|'|`|<|>")
def escape(something, _escape_re=_escape_re, substitute=substitute):
return _escape_re.sub(substitute, something)
class Scope:
def __init__(self, context, parent, data=None):
self.context = context
self.parent = parent
if parent and isinstance(parent,Scope):
self.data=parent.data
else:
self.data={}
if data:
self.data.update(data)
def get(self, name, default=None):
if name == '__parent':
return self.parent
elif name == 'this':
return self.context
elif name.startswith("@"):
return self.data.get(name[1:])
result = self.context.get(name, self)
if result is not self:
return result
return default
__getitem__ = get
def __str__(self):
return str(self.context)
def resolve(context, *segments):
# print("resolve",segments)
for segment in segments:
if context is None:
return None
if segment in (None, ""):
continue
if type(context) in (list, tuple):
offset = int(segment)
try:
context = context[offset]
except:
context = None
else:
if isinstance(segment, str) and segment.isdigit():
segment = int(segment)
context = context.get(segment)
return context
def _paginate(this, options, data, limit=None, offset=None, url=None):
if not data:
return options['inverse'](this)
if limit is None:
limit = 10
if offset is None:
offset = 0
count = len(data)
page_no = math.floor(offset / limit) + 1
num_pages = math.floor((count + limit - 1) / limit)
paginate = {
"data": data[offset:offset + limit],
"limit": limit,
"offset": offset,
"count": count,
"item_first": offset + 1,
"item_last": min(offset + limit, count),
"page_no": page_no,
"num_pages": num_pages,
"parts": [],
}
if url:
base_url = re.sub("&offset=\d+", "", url) # XXX
else:
base_url = ""
if base_url.find("?")==-1: # XXX
base_url+="?"
if page_no > 1:
p = page_no - 1
o = (p - 1) * limit
paginate["previous"] = {
"page_no": p,
"url": base_url + "&offset=%d" % o if base_url else None,
}
if page_no < num_pages:
p = page_no + 1
o = (p - 1) * limit
paginate["next"] = {
"page_no": p,
"url": base_url + "&offset=%d" % o if base_url else None,
}
if num_pages > 1:
first_part_page_no = max(1, page_no - 2)
last_part_page_no = min(num_pages, page_no + 1)
for p in range(first_part_page_no, last_part_page_no + 1):
o = (p - 1) * limit
part = {
"page_no": p,
"active": p == page_no,
"url": base_url + "&offset=%d" % o if base_url else None,
}
paginate["parts"].append(part)
scope = Scope({"paginate": paginate}, this)
return options['fn'](scope)
def _each(this, options, context, order=None, offset=None, limit=None):
if not context:
return None
result = strlist()
i = 0
if order:
if len(order.split(" ")) == 2:
if order.split(" ")[1] == "desc":
context2 = sorted(context, key=lambda x: x[order.split(" ")[0]])[::-1]
else:
context2 = sorted(context, key=lambda x: x[order])
else:
context2 = context
if offset:
context2=context2[offset:]
if limit:
context2=context2[:limit]
for ctx in context2:
data={}
if isinstance(context2, (list, BrowseList)):
data['index'] = i
data['item_no'] = i+1
data['is_last'] = i == len(context2) - 1
if isinstance(context2, dict):
data['key'] = ctx
scope = Scope(ctx, this, data=data)
result.grow(options['fn'](scope))
i += 1
return result
def _if(this, options, context):
if isinstance(context, collections.Callable):
context = context(this)
if context:
return options['fn'](this)
else:
return options['inverse'](this)
def _log(this, context):
log(context)
def _unless(this, options, context):
if not context:
return options['fn'](this)
def _blockHelperMissing(this, options, context):
if isinstance(context, collections.Callable):
context = context(this)
if context != "" and not context:
return options['inverse'](this)
if type(context) in (list, strlist, tuple):
return _each(this, options)
if context is True:
callwith = this
else:
callwith = context
return options['fn'](callwith)
def _helperMissing(scope, name, *args):
if not args:
return None
raise Exception("Could not find property %s" % (name,))
def _with(this, options, context):
if context:
scope = Scope(context, this)
return options['fn'](scope)
else:
return options['inverse'](this)
def _file_path(this, context, thumbnail=None):
if context is None:
return ""
try:
dbname = database.get_active_db()
if thumbnail:
basename, ext = os.path.splitext(context)
basename2, _, rand = basename.rpartition(",")
fname = basename2 + "-resize-256," + rand + ext
else:
fname = context
return "/static/db/" + dbname + "/files/" + fname
except:
return ""
def _currency(this, context, nogroup=False, zero=None):
if context is None:
return ""
try:
val = float(context) # in case string
if zero is not None and abs(val) < 0.0001:
return zero
val = "{:0,.2f}".format(val)
if nogroup:
val = val.replace(",", "")
return val
except:
return ""
def _compare(this, options, val1, val2, operator="="):
if operator == "=":
res = val1 == val2
elif operator == "!=":
res = val1 == val2
elif operator == "<=":
res = val1 <= val2
elif operator == ">=":
res = val1 >= val2
elif operator == "<":
res = val1 < val2
elif operator == ">":
res = val1 > val2
elif operator == "in":
res = val1 in val2
elif operator == "not in":
res = val1 not in val2
else:
raise Exception("Invalid operator: '%s'" % operator)
if res:
return options['fn'](this)
else:
return options['inverse'](this)
def _ifeq(this, options, val1, val2):
if val1 == val2:
return options['fn'](this)
else:
return options['inverse'](this)
def _change_lang_url(this, lang): # FIXME
return "/ecom_index?set_lang=%s" % lang
def _if_match(this, options, val, pattern):
if not val:
val = ""
exp = pattern.replace("%", ".*")
if re.match(exp, val):
return options['fn'](this)
else:
return options['inverse'](this)
def _first(this, options, items):
if not items:
return ""
item = items[0]
return options['fn'](item)
def _after_first(this, options, items):
html = strlist()
for item in items[1:]:
html.grow(options["fn"](item))
return html
def _translate(this, val):
return _(val)
def _padding(this, val):
if not val:
return ""
return "-" + " " * int(val / 10) # XXX
def remove_zeros(s):
z = 0
while s[-1 - z] == "0":
z += 1
if s[-1 - z] == ".":
z += 1
if z:
s = s[:-z]
return s
def _fmt_ths_qty(this, val):
if val is None:
return ""
return "{:0,.0f}".format(val)
def _fmt_qty(this, val):
if val is None:
return ""
try:
val = float(val) # in case string
return remove_zeros("%.6f" % val)
except:
return "ERR"
def _fmt_number(this, val):
if val is None:
return ""
try:
val = float(val) # in case string
return remove_zeros("%.6f" % val)
except:
return "ERR"
def _filename(this, val):
if val is None:
return ""
try:
name, ext = os.path.splitext(val)
name2 = name.rsplit(",")[0]
return name2 + ext
except:
return val
def _lookup(this, o, *inds):
v = resolve(o, *inds)
if not v:
return ""
return str(v)
def _if_lookup(this, options, o, *inds):
v = resolve(o, *inds)
if v:
return options['fn'](this)
else:
return options['inverse'](this)
def _unless_lookup(this, options, o, *inds):
v = resolve(o, *inds)
if not v:
return options['fn'](this)
def _length(this, val):
if val is None:
return ""
return len(val)
def _unless_eq(this, options, val1, val2):
if val1 != val2:
return options['fn'](this)
def _ldelim(this):
return "{{"
def _rdelim(this):
return "}}"
def _fmt_date(this, val, fmt=None):
if not val:
return None
try:
d = datetime.datetime.strptime(val[:10], "%Y-%m-%d")
settings = get_model("settings").browse(1) # FIXME: speed
if not fmt:
fmt = settings.date_format
if fmt:
fmt = fmt.replace("YYYY", "%Y").replace("MM", "%m").replace("DD", "%d")
else:
fmt = "%Y-%m-%d"
s = d.strftime(fmt)
except:
print("Cannot convert date format for %s" % val)
s = val
return s
def _fmt_datetime(this, val, fmt=None):
if not val:
return None
try:
d = datetime.datetime.strptime(val, "%Y-%m-%d %H:%M:%S")
settings = get_model("settings").browse(1) # FIXME: speed
if not fmt:
fmt = settings.date_format
if fmt:
fmt = fmt.replace("YYYY", "%Y").replace("MM", "%m").replace("DD", "%d")
else:
fmt = "%Y-%m-%d"
fmt+=" %H:%M:%S"
s = d.strftime(fmt)
except:
print("Cannot convert datetime format for %s" % val)
s = val
return s
def _fmt_bool(this, val):
if val:
return "Yes"
return "No"
def _col_if(this, val):
if val:
return ""
else:
return "[[HIDE_COL]]"
if barcode:
class NFBarWriter(ImageWriter):
def calculate_size(self, *args, **kw):
self.text = "" # XXX
if self.custom_module_height:
self.module_height = self.custom_module_height
return ImageWriter.calculate_size(self, *args, **kw)
def _barcode(this, val, height=None, type="code128", add_checksum=False):
if not barcode:
return ""
if not val:
return ""
try:
bar_cls = barcode.get_barcode_class(type)
writer = NFBarWriter()
writer.custom_module_height = height
if type == "code39":
bar = bar_cls(str(val), writer=writer, add_checksum=add_checksum)
else:
bar = bar_cls(str(val), writer=writer)
_, fname = tempfile.mkstemp(suffix=".png", prefix="barcode-")
fullname = bar.save(fname.replace(".png", ""))
return fullname
except Exception as e:
print("WARNING: failed to generate barcode: %s (%s)" % (val, e))
return ""
_acc_bal_cache = {}
def get_all_balances(date_from=None, date_to=None, track1=None, track2=None):
t = time.time()
k = (date_from, date_to, track1, track2)
if k in _acc_bal_cache:
res, res_t = _acc_bal_cache[k]
if t - res_t <= 10:
print("cache hit", k)
return res
print("cache miss", k)
if track1:
res = get_model("account.track.categ").search([["code", "=", track1]])
if not res:
raise Exception("Invalid tracking category: %s" % track1)
track_id = res[0]
else:
track_id = None
if track2:
res = get_model("account.track.categ").search([["code", "=", track2]])
if not res:
raise Exception("Invalid tracking category: %s" % track2)
track2_id = res[0]
else:
track2_id = None
ctx = {
"date_from": date_from,
"date_to": date_to,
"track_id": track_id,
"track2_id": track2_id,
}
res = get_model("account.account").search_read([["type", "!=", "view"]], ["code", "balance"], context=ctx)
_acc_bal_cache[k] = (res, t)
return res
def _acc_balance(this, acc_from=None, acc_to=None, date_from=None, date_to=None, track1=None, track2=None, negate=False):
print("_acc_balance", acc_from, acc_to, date_from, date_to, track1, track2)
res = get_all_balances(date_from=date_from, date_to=date_to, track1=track1, track2=track2)
bal = 0
for r in res:
if r["code"] >= acc_from and r["code"] <= acc_to:
bal += r["balance"]
if negate:
return -bal
return bal
def _editable_field(this, name, text_only=False):
obj = this.context
model = obj["_model"]
m = get_model(model)
f = m._fields[name]
val = obj[name] # XXX
if isinstance(f, fields.Char):
field_type = "char"
elif isinstance(f, fields.Text):
field_type = "text"
elif isinstance(f, fields.Float):
field_type = "float"
else:
raise Exception("Unsupported editable field: %s.%s" % (model, name))
html = '<div class="nf-editable" data-model="%s" data-field="%s" data-type="%s" data-id="%s"' % (
model, name, field_type, obj["id"])
if text_only:
html += ' data-text-only="1"'
html += '>%s</div>' % val
return html
def _editable_block(this, options, name, page_id=None, post_id=None):
block = get_model("cms.block").get_block(name, page_id=page_id, post_id=post_id)
if block:
out = '<div class="nf-editable" data-model="cms.block" data-field="html" data-type="text" data-id="%s">%s</div>' % (
block["id"], block["html"])
else:
html = options['fn'](this)
defaults = {
"name": name,
}
if page_id:
defaults["page_id"] = page_id
if post_id:
defaults["post_id"] = post_id
out = '<div class="nf-editable" data-model="cms.block" data-field="html" data-type="text" data-defaults=\'%s\'>%s</div>' % (
json.dumps(defaults), html)
return out
def _if_perm(this, options, perm):
if access.check_permission_other(perm):
return options['fn'](this)
else:
return options['inverse'](this)
def _odt_linebreak(this, val):
if val is None:
return ""
val = str(val)
val = saxutils.escape(val)
return val.replace("\n", "<text:line-break></text:line-break>")
_globals_ = {
'helpers': {
'blockHelperMissing': _blockHelperMissing,
'paginate': _paginate,
'each': _each,
'if': _if,
'helperMissing': _helperMissing,
'log': _log,
'unless': _unless,
'with': _with,
"file_path": _file_path,
"currency": _currency,
"change_lang_url": _change_lang_url,
'compare': _compare,
'ifeq': _ifeq,
'if_match': _if_match,
't': _translate,
'padding': _padding,
'fmt_qty': _fmt_qty,
'fmt_ths_qty': _fmt_ths_qty,
'fmt_number': _fmt_number,
'fmt_date': _fmt_date,
'fmt_datetime': _fmt_datetime,
'fmt_bool': _fmt_bool,
'filename': _filename,
'first': _first,
'after_first': _after_first,
"lookup": _lookup,
"if_lookup": _if_lookup,
"unless_lookup": _unless_lookup,
"length": _length,
"unless_eq": _unless_eq,
"ldelim": _ldelim,
"rdelim": _rdelim,
"col_if": _col_if,
#"acc_balance": _acc_balance, # XXX: move this
"editable_field": _editable_field,
"editable_block": _editable_block,
"if_perm": _if_perm,
"barcode": _barcode,
"odt_linebreak": _odt_linebreak,
},
}
def register_helper(name,func):
_globals_["helpers"][name]=func
class CodeBuilder:
def __init__(self):
self.stack = []
self.blocks = {}
def start(self):
self._result = strlist()
self.stack.append((self._result, "render"))
self._result.grow("def render(context, helpers=None, partials=None):\n")
self._result.grow(" result = strlist()\n")
self._result.grow(" _helpers = dict(_globals_['helpers'])\n")
self._result.grow(" if helpers is not None: _helpers.update(helpers)\n")
self._result.grow(" helpers = _helpers\n")
self._result.grow(" if partials is None: partials = {}\n")
def finish(self):
self._result.grow(" return result\n")
source = "from netforce.hbs_compiler import strlist,escape,Scope,partial,_globals_,resolve\n\n"
for name, lines in reversed(sorted(self.blocks.items())):
source += "".join(lines) + "\n"
lines = self._result
source += "".join(lines)
return source
def start_block(self):
name = "render_block%d" % len(self.blocks)
self._result = strlist()
self.blocks[name] = self._result
self.stack.append((self._result, name))
self._result.grow("def %s(context, helpers=None, partials=None):\n" % name)
self._result.grow(" result = strlist()\n")
self._result.grow(" _helpers = dict(_globals_['helpers'])\n")
self._result.grow(" if helpers is not None: _helpers.update(helpers)\n")
self._result.grow(" helpers = _helpers\n")
self._result.grow(" if partials is None: partials = {}\n")
def finish_block(self):
self._result.grow(" return result\n")
name = self.stack.pop(-1)[1]
self._result = self.stack and self.stack[-1][0]
return name
def add_block(self, symbol, arguments, name, alt_name):
call = self.arguments_to_call(arguments)
self._result.grow([
" options = {'fn': %s}\n" % name,
" options['helpers'] = helpers\n"
" options['partials'] = partials\n"
])
if alt_name:
self._result.grow([" options['inverse'] = %s\n" % alt_name])
else:
self._result.grow([
" options['inverse'] = lambda this: None\n"
])
self._result.grow([
" value = helper = helpers.get('%s')\n" % symbol,
" if value is None:\n"
" value = context.get('%s')\n" % symbol,
" if helper and callable(helper):\n"
" this = Scope(context, context)\n"
" value = value(this, options, %s\n" % call,
" else:\n"
" helper = helpers['blockHelperMissing']\n"
" value = helper(context, options, value)\n"
" if value is None: value = ''\n"
" result.grow(value)\n"
])
def add_literal(self, value):
self._result.grow(" result.append(%r)\n" % value)
def _lookup_arg(self, arg):
if not arg:
return "context"
return arg
def arguments_to_call(self, arguments):
params = list(map(self._lookup_arg, arguments))
return ", ".join(params) + ")"
def find_lookup(self, path, path_type, call):
if path and path_type == "simple": # simple names can reference helpers.
# TODO: compile this whole expression in the grammar; for now,
# fugly but only a compile time overhead.
# XXX: just rm.
realname = path.replace('.get("', '').replace('")', '')
self._result.grow([
" value = helpers.get('%s')\n" % realname,
" if value is None:\n"
" value = resolve(context, '%s')\n" % path,
])
elif path_type == "simple":
realname = None
self._result.grow([
" value = resolve(context, '%s')\n" % path,
])
else:
realname = None
self._result.grow(" value = %s\n" % path)
self._result.grow([
" if callable(value):\n"
" this = Scope(context, context)\n"
" value = value(this, %s\n" % call,
])
if realname:
self._result.grow(
" elif value is None:\n"
" this = Scope(context, context)\n"
" value = helpers.get('helperMissing')(this, '%s', %s\n"
% (realname, call)
)
self._result.grow(" if value is None: value = ''\n")
def add_escaped_expand(self, path_type_path, arguments):
(path_type, path) = path_type_path
call = self.arguments_to_call(arguments)
self.find_lookup(path, path_type, call)
self._result.grow([
" if type(value) is not strlist:\n",
" value = escape(str(value))\n",
" result.grow(value)\n"
])
def add_expand(self, path_type_path, arguments):
(path_type, path) = path_type_path
call = self.arguments_to_call(arguments)
self.find_lookup(path, path_type, call)
self._result.grow([
" if type(value) is not strlist:\n",
" value = str(value)\n",
" result.grow(value)\n"
])
def _debug(self):
self._result.grow(" import pdb;pdb.set_trace()\n")
def add_invertedblock(self, symbol, arguments, name):
self._result.grow([
" value = context.get('%s')\n" % symbol,
" if not value:\n"
" "])
self._invoke_template(name, "context")
def _invoke_template(self, fn_name, this_name):
self._result.grow([
" result.grow(",
fn_name,
"(",
this_name,
", helpers=helpers, partials=partials))\n"
])
def add_partial(self, symbol, arguments):
if arguments:
assert len(arguments) == 1, arguments
arg = arguments[0]
else:
arg = ""
self._result.grow([
" inner = partials['%s']\n" % symbol,
" scope = Scope(%s, context)\n" % self._lookup_arg(arg)])
self._invoke_template("inner", "scope")
class Compiler:
_handlebars = OMeta.makeGrammar(handlebars_grammar, {}, 'handlebars')
_builder = CodeBuilder()
_compiler = OMeta.makeGrammar(compile_grammar, {'builder': _builder})
def __init__(self):
self._helpers = {}
def compile(self, source):
self._builder.stack = []
self._builder.blocks = {}
print("compile step 1...")
tree, err = self._handlebars(source).apply('template')
if err.error:
raise Exception(err.formatError(source))
print("compile step 2...")
code, err = self._compiler(tree).apply('compile')
if err.error:
raise Exception(err.formatError(tree))
return code
|
the-stack_0_7612 | from mie2c.e2c import Encoder, Decoder, Transition, LinearTransition, PWATransition
import torch
from torch import nn
def get_bounce_encoder(dim_in, dim_z):
channels_enc = [6, 32, 32, 16, 16]
ff_shape = [128, 128, 128]
conv_activation = torch.nn.ReLU()
ff_activation = torch.nn.ReLU()
n_channels = len(channels_enc) - 1
kernel_enc = [5, 3, 5, 3, 5]
stride= [2, 1, 2, 1, 2]
padding= [2, 1, 2, 1, 2]
pool = [None, 2, None, 2, 2]
return Encoder(dim_in, dim_z, channels_enc, ff_shape, kernel_enc, stride, padding, pool, conv_activation=conv_activation, ff_activation=ff_activation)
def get_bounce_decoder(dim_in, dim_out):
channels_dec = [6, 32, 32, 16, dim_out[0]]
ff_shape = [128, 128, 128]
conv_activation = torch.nn.ReLU()
ff_activation = torch.nn.ReLU()
n_channels = len(channels_dec) - 1
kernel_dec = [5, 3, 5, 3, 5]
stride = [1, 1, 1, 1, 2]
padding = [2, 1, 2, 1, 2]
return Decoder(dim_in, dim_out, channels_dec, ff_shape, kernel_dec, stride, padding, ff_activation=ff_activation, conv_activation=conv_activation)
def get_bounce_transition(dim_z, dim_u):
nn_width = 32
trans = nn.Sequential(
nn.Linear(dim_z, nn_width),
nn.BatchNorm1d(nn_width),
nn.ReLU(),
nn.Linear(nn_width, nn_width),
nn.BatchNorm1d(nn_width),
nn.ReLU(),
nn.Linear(nn_width, dim_z*2)
)
return Transition(trans, dim_z, dim_u)
def get_bounce_linear_transition(dim_z, dim_u, low_rank=True):
A = torch.nn.Parameter(2. * (torch.randn(dim_z, dim_z) - .5))
r = torch.nn.Parameter(2. * (torch.randn(dim_z) - .5))
v = torch.nn.Parameter(2. * (torch.randn(dim_z) - .5))
B = torch.nn.Parameter(2. * (torch.randn(dim_z, dim_u) - .5))
o = torch.nn.Parameter(2. * (torch.randn(dim_z, 1) - .5))
return LinearTransition(dim_z, dim_u, r, v, A, B, o, low_rank=low_rank)
def get_bounce_pwa_transition(num_modes, dim_z, dim_u, low_rank=True):
mode_classifier = nn.Linear(dim_z, num_modes)
As = torch.nn.ParameterList()
rs = torch.nn.ParameterList()
vs = torch.nn.ParameterList()
Bs = torch.nn.ParameterList()
os = torch.nn.ParameterList()
for mode in range(num_modes):
As.append(torch.nn.Parameter(2. * (torch.randn(dim_z, dim_z) - .5)))
rs.append(torch.nn.Parameter(2. * (torch.randn(dim_z) - .5)))
vs.append(torch.nn.Parameter(2. * (torch.randn(dim_z) - .5)))
Bs.append(torch.nn.Parameter(2. * (torch.randn(dim_z, dim_u) - .5)))
os.append(torch.nn.Parameter(2. * (torch.randn(dim_z, 1) - .5)))
return PWATransition(dim_z, dim_u, mode_classifier, rs, vs, As, Bs, os, low_rank=low_rank)
|
the-stack_0_7614 | import matplotlib.pyplot as plt
from celluloid import Camera
import sys
fig = plt.figure()
camera = Camera(fig)
infile = sys.argv[1]
with open(infile, "r") as f:
for line in f.readlines():
plt.plot([float(i.strip()) for i in line.strip()[1:-1].split(",")], c="b")
camera.snap()
animation = camera.animate()
plt.show()
if len(sys.argv) == 3:
animation.save(sys.argv[2])
print(f"animation saved to {sys.argv[2]}")
|
the-stack_0_7617 | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Deferred initialization of tf.Modules (distributions, bijectors, etc.)."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.util import special_methods
class DeferredModule(tf.Module, special_methods.SpecialMethods):
"""Wrapper to defer initialization of a `tf.Module` instance.
`DeferredModule` is a general-purpose mechanism for creating objects that are
'tape safe', meaning that computation occurs only when an instance
method is called, not at construction. This ensures that method calls inside
of a `tf.GradientTape` context will produce gradients to any underlying
`tf.Variable`s.
### Examples
TFP's built-in Distributions and Bijectors are tape-safe by contract, but
this does not extend to cases where computation is required
to construct an object's parameters prior to initialization.
For example, suppose we want to construct a Gamma
distribution with a given mean and variance. In a naive implementation,
we would convert these to the Gamma's native `concentration` and
`rate` parameters when the distribution is constructed. Any future method
calls would produce gradients to `concentration` and `rate`, but not to the
underlying mean and variance:
```python
mean, variance = tf.Variable(3.2), tf.Variable(9.1)
dist = tfd.Gamma(concentration=mean**2 / variance,
rate=mean / variance)
with tf.GradientTape() as tape:
lp = dist.log_prob(5.0)
grads = tape.gradient(lp, [mean, variance])
# ==> `grads` are `[None, None]` !! :-(
```
To preserve the gradients, we can defer the parameter transformation using
`DeferredModule`. The resulting object behaves just like a
`tfd.Gamma` instance, however, instead of running the `Gamma` constructor just
once, it internally applies the parameter transformation and constructs a
new, temporary instance of `tfd.Gamma` on *every method invocation*.
This ensures that all operations needed to compute a method's return value
from any underlying variables are performed every time the method is invoked.
A surrounding `GradientTape` context will therefore be able to trace the full
computation.
```python
def gamma_from_mean_and_variance(mean, variance, **kwargs):
rate = mean / variance
return tfd.Gamma(concentration=mean * rate, rate=rate, **kwargs)
mean, variance = tf.Variable(3.2), tf.Variable(9.1)
deferred_dist = tfp.experimental.util.DeferredModule(
build_fn=gamma_from_mean_and_variance,
mean=mean, # May be passed by position or by name.
variance=variance)
with tf.GradientTape() as tape:
lp = deferred_dist.log_prob(5.0)
grads = tape.gradient(lp, [mean, variance])
# ==> `grads` are defined!
```
Note that we could have achieved a similar effect by using
`tfp.util.DeferredTensor` to individually defer the `concentration` and `rate`
parameters. However, this would have been significantly more verbose, and
would not share any computation between the two parameter transformations.
In general, `DeferredTensor` is often idiomatic for simple transformations of
a single value, while `DeferredModule` may be preferred for transformations
that operate on multiple values and/or contain multiple steps.
### Caveats
Objects derived from a `DeferredModule` are no longer deferred, so
they will not preserve gradients. For example, slicing into a deferred
Distribution yields a new, concrete Distribution instance:
```python
def normal_from_log_scale(scaled_loc, log_scale):
return tfd.Normal(loc=5 * scaled_loc, scale=tf.exp(log_scale))
dist = tfp.experimental.util.DeferredModule(
build_fn=normal_from_log_scale,
scaled_loc=tf.Variable([1., 2., 3.]),
log_scale=tf.Variable([1., 1., 1.]))
dist.batch_shape # ==> [3]
len(dist.trainable_variables) # ==> 2
slice = dist[:2] # Instantiates a new, non-deferred Distribution.
slice.batch_shape # ==> [2]
len(slice.trainable_variables) # ==> 0 (!)
# If needed, we could defer the slice with another layer of wrapping.
deferred_slice = tfp.experimental.util.DeferredModule(
build_fn=lambda d: d[:2],
d=dist)
len(deferred_slice.trainable_variables) # ==> 2
```
"""
def __init__(self, build_fn, *args, also_track=None, **kwargs):
"""Defers initialization of an object with transformed arguments.
Args:
build_fn: Python callable specifying a deferred transformation of the
provided arguments. This must have signature
`module = build_fn(*args, **kwargs)`. The return value `module` is an
instance of `tf.Module`.
*args: Optional positional arguments to `build_fn`.
also_track: Optional instance or structure of instances of `tf.Variable`
and/or `tf.Module`, containing any additional trainable variables that
the `build_fn` may access beyond the given `args` and `kwargs`. This
ensures that such variables will be correctly tracked in
`self.trainable_variables`.
Default value: `None`.
**kwargs: Optional keyword arguments to `build_fn`.
"""
self._build_fn = build_fn
self._param_args = args
self._param_kwargs = kwargs
self._deferred_module_also_track = also_track
# In order for DeferredModule to work as a tf.Module, we need to ensure that
# attrs used by tf.Module are handled directly, rather than being forwarded
# to the inner class.
self._module_attrs = set(dir(tf.Module()))
super(DeferredModule, self).__init__()
def __action__(self, fn, *args, **kwargs):
kwargs.pop('_action_name', None)
return fn(self._build_module(), *args, **kwargs)
def _build_module(self):
return self._build_fn(*self._param_args, **self._param_kwargs)
def __getattr__(self, attr, **kwargs):
if attr in ('_build_fn',
'_param_args',
'_param_kwargs',
'_module_attrs',
'_deferred_module_also_track'):
raise AttributeError()
if attr in self._module_attrs:
raise AttributeError()
return super(DeferredModule, self).__getattr__(attr, **kwargs)
|
the-stack_0_7619 | #
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import libqtile.config
from libqtile import layout
from libqtile.confreader import Config
from test.conftest import no_xinerama
from test.layouts.layout_utils import assert_focus_path, assert_focused
class ColumnsConfig(Config):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
layout.Columns(num_columns=3),
]
floating_layout = libqtile.resources.default_config.floating_layout
keys = []
mouse = []
screens = []
follow_mouse_focus = False
def columns_config(x):
return no_xinerama(pytest.mark.parametrize("manager", [ColumnsConfig], indirect=True)(x))
# This currently only tests the window focus cycle
@columns_config
def test_columns_window_focus_cycle(manager):
# setup 3 tiled and two floating clients
manager.test_window("one")
manager.test_window("two")
manager.test_window("three")
manager.test_window("float1")
manager.c.window.toggle_floating()
manager.test_window("float2")
manager.c.window.toggle_floating()
manager.test_window("four")
# test preconditions, columns adds clients at pos after current, in two stacks
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['one']
assert columns[1]['clients'] == ['two']
assert columns[2]['clients'] == ['four', 'three']
# last added window has focus
assert_focused(manager, "four")
# assert window focus cycle, according to order in layout
assert_focus_path(manager, 'three', 'float1', 'float2', 'one', 'two', 'four')
@columns_config
def test_columns_swap_column_left(manager):
manager.test_window("1")
manager.test_window("2")
manager.test_window("3")
manager.test_window("4")
# test preconditions
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['1']
assert columns[1]['clients'] == ['2']
assert columns[2]['clients'] == ['4', '3']
assert_focused(manager, "4")
# assert columns are swapped left
manager.c.layout.swap_column_left()
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['1']
assert columns[1]['clients'] == ['4', '3']
assert columns[2]['clients'] == ['2']
manager.c.layout.swap_column_left()
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['4', '3']
assert columns[1]['clients'] == ['1']
assert columns[2]['clients'] == ['2']
manager.c.layout.swap_column_left()
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['2']
assert columns[1]['clients'] == ['1']
assert columns[2]['clients'] == ['4', '3']
@columns_config
def test_columns_swap_column_right(manager):
manager.test_window("1")
manager.test_window("2")
manager.test_window("3")
manager.test_window("4")
# test preconditions
assert manager.c.layout.info()['columns'][0]['clients'] == ['1']
assert manager.c.layout.info()['columns'][1]['clients'] == ['2']
assert manager.c.layout.info()['columns'][2]['clients'] == ['4', '3']
assert_focused(manager, "4")
# assert columns are swapped right
manager.c.layout.swap_column_right()
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['4', '3']
assert columns[1]['clients'] == ['2']
assert columns[2]['clients'] == ['1']
manager.c.layout.swap_column_right()
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['2']
assert columns[1]['clients'] == ['4', '3']
assert columns[2]['clients'] == ['1']
manager.c.layout.swap_column_right()
columns = manager.c.layout.info()['columns']
assert columns[0]['clients'] == ['2']
assert columns[1]['clients'] == ['1']
assert columns[2]['clients'] == ['4', '3']
|
the-stack_0_7622 | from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
import warnings
import os
import datetime
from tqdm import tqdm
from zipfile import ZipFile, ZIP_DEFLATED
from scipy.ndimage.morphology import distance_transform_edt, binary_fill_holes
from scipy.ndimage.measurements import find_objects
from scipy.optimize import minimize_scalar
from skimage.measure import regionprops
from csbdeep.utils import _raise
from csbdeep.utils.six import Path
from .matching import matching_dataset
def gputools_available():
try:
import gputools
except:
return False
return True
def path_absolute(path_relative):
""" Get absolute path to resource"""
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, path_relative)
def _is_power_of_2(i):
assert i > 0
e = np.log2(i)
return e == int(e)
def _normalize_grid(grid,n):
try:
grid = tuple(grid)
(len(grid) == n and
all(map(np.isscalar,grid)) and
all(map(_is_power_of_2,grid))) or _raise(TypeError())
return tuple(int(g) for g in grid)
except (TypeError, AssertionError):
raise ValueError("grid = {grid} must be a list/tuple of length {n} with values that are power of 2".format(grid=grid, n=n))
def _edt_dist_func(anisotropy):
try:
from edt import edt as edt_func
# raise ImportError()
dist_func = lambda img: edt_func(np.ascontiguousarray(img>0), anisotropy=anisotropy)
except ImportError:
dist_func = lambda img: distance_transform_edt(img, sampling=anisotropy)
return dist_func
def _edt_prob(lbl_img, anisotropy=None):
constant_img = lbl_img.min() == lbl_img.max() and lbl_img.flat[0] > 0
if constant_img:
lbl_img = np.pad(lbl_img, ((1,1),)*lbl_img.ndim, mode='constant')
warnings.warn("EDT of constant label image is ill-defined. (Assuming background around it.)")
dist_func = _edt_dist_func(anisotropy)
prob = np.zeros(lbl_img.shape,np.float32)
for l in (set(np.unique(lbl_img)) - set([0])):
mask = lbl_img==l
edt = dist_func(mask)[mask]
prob[mask] = edt/(np.max(edt)+1e-10)
if constant_img:
prob = prob[(slice(1,-1),)*lbl_img.ndim].copy()
return prob
def edt_prob(lbl_img, anisotropy=None):
"""Perform EDT on each labeled object and normalize."""
def grow(sl,interior):
return tuple(slice(s.start-int(w[0]),s.stop+int(w[1])) for s,w in zip(sl,interior))
def shrink(interior):
return tuple(slice(int(w[0]),(-1 if w[1] else None)) for w in interior)
constant_img = lbl_img.min() == lbl_img.max() and lbl_img.flat[0] > 0
if constant_img:
lbl_img = np.pad(lbl_img, ((1,1),)*lbl_img.ndim, mode='constant')
warnings.warn("EDT of constant label image is ill-defined. (Assuming background around it.)")
dist_func = _edt_dist_func(anisotropy)
objects = find_objects(lbl_img)
prob = np.zeros(lbl_img.shape,np.float32)
for i,sl in enumerate(objects,1):
# i: object label id, sl: slices of object in lbl_img
if sl is None: continue
interior = [(s.start>0,s.stop<sz) for s,sz in zip(sl,lbl_img.shape)]
# 1. grow object slice by 1 for all interior object bounding boxes
# 2. perform (correct) EDT for object with label id i
# 3. extract EDT for object of original slice and normalize
# 4. store edt for object only for pixels of given label id i
shrink_slice = shrink(interior)
grown_mask = lbl_img[grow(sl,interior)]==i
mask = grown_mask[shrink_slice]
edt = dist_func(grown_mask)[shrink_slice][mask]
prob[sl][mask] = edt/(np.max(edt)+1e-10)
if constant_img:
prob = prob[(slice(1,-1),)*lbl_img.ndim].copy()
return prob
def _fill_label_holes(lbl_img, **kwargs):
lbl_img_filled = np.zeros_like(lbl_img)
for l in (set(np.unique(lbl_img)) - set([0])):
mask = lbl_img==l
mask_filled = binary_fill_holes(mask,**kwargs)
lbl_img_filled[mask_filled] = l
return lbl_img_filled
def fill_label_holes(lbl_img, **kwargs):
"""Fill small holes in label image."""
# TODO: refactor 'fill_label_holes' and 'edt_prob' to share code
def grow(sl,interior):
return tuple(slice(s.start-int(w[0]),s.stop+int(w[1])) for s,w in zip(sl,interior))
def shrink(interior):
return tuple(slice(int(w[0]),(-1 if w[1] else None)) for w in interior)
objects = find_objects(lbl_img)
lbl_img_filled = np.zeros_like(lbl_img)
for i,sl in enumerate(objects,1):
if sl is None: continue
interior = [(s.start>0,s.stop<sz) for s,sz in zip(sl,lbl_img.shape)]
shrink_slice = shrink(interior)
grown_mask = lbl_img[grow(sl,interior)]==i
mask_filled = binary_fill_holes(grown_mask,**kwargs)[shrink_slice]
lbl_img_filled[sl][mask_filled] = i
return lbl_img_filled
def sample_points(n_samples, mask, prob=None, b=2):
"""sample points to draw some of the associated polygons"""
if b is not None and b > 0:
# ignore image boundary, since predictions may not be reliable
mask_b = np.zeros_like(mask)
mask_b[b:-b,b:-b] = True
else:
mask_b = True
points = np.nonzero(mask & mask_b)
if prob is not None:
# weighted sampling via prob
w = prob[points[0],points[1]].astype(np.float64)
w /= np.sum(w)
ind = np.random.choice(len(points[0]), n_samples, replace=True, p=w)
else:
ind = np.random.choice(len(points[0]), n_samples, replace=True)
points = points[0][ind], points[1][ind]
points = np.stack(points,axis=-1)
return points
def calculate_extents(lbl, func=np.median):
""" Aggregate bounding box sizes of objects in label images. """
if isinstance(lbl,(tuple,list)) or (isinstance(lbl,np.ndarray) and lbl.ndim==4):
return func(np.stack([calculate_extents(_lbl,func) for _lbl in lbl], axis=0), axis=0)
n = lbl.ndim
n in (2,3) or _raise(ValueError("label image should be 2- or 3-dimensional (or pass a list of these)"))
regs = regionprops(lbl)
if len(regs) == 0:
return np.zeros(n)
else:
extents = np.array([np.array(r.bbox[n:])-np.array(r.bbox[:n]) for r in regs])
return func(extents, axis=0)
def polyroi_bytearray(x,y,pos=None,subpixel=True):
""" Byte array of polygon roi with provided x and y coordinates
See https://github.com/imagej/imagej1/blob/master/ij/io/RoiDecoder.java
"""
import struct
def _int16(x):
return int(x).to_bytes(2, byteorder='big', signed=True)
def _uint16(x):
return int(x).to_bytes(2, byteorder='big', signed=False)
def _int32(x):
return int(x).to_bytes(4, byteorder='big', signed=True)
def _float(x):
return struct.pack(">f", x)
subpixel = bool(subpixel)
# add offset since pixel center is at (0.5,0.5) in ImageJ
x_raw = np.asarray(x).ravel() + 0.5
y_raw = np.asarray(y).ravel() + 0.5
x = np.round(x_raw)
y = np.round(y_raw)
assert len(x) == len(y)
top, left, bottom, right = y.min(), x.min(), y.max(), x.max() # bbox
n_coords = len(x)
bytes_header = 64
bytes_total = bytes_header + n_coords*2*2 + subpixel*n_coords*2*4
B = [0] * bytes_total
B[ 0: 4] = map(ord,'Iout') # magic start
B[ 4: 6] = _int16(227) # version
B[ 6: 8] = _int16(0) # roi type (0 = polygon)
B[ 8:10] = _int16(top) # bbox top
B[10:12] = _int16(left) # bbox left
B[12:14] = _int16(bottom) # bbox bottom
B[14:16] = _int16(right) # bbox right
B[16:18] = _uint16(n_coords) # number of coordinates
if subpixel:
B[50:52] = _int16(128) # subpixel resolution (option flag)
if pos is not None:
B[56:60] = _int32(pos) # position (C, Z, or T)
for i,(_x,_y) in enumerate(zip(x,y)):
xs = bytes_header + 2*i
ys = xs + 2*n_coords
B[xs:xs+2] = _int16(_x - left)
B[ys:ys+2] = _int16(_y - top)
if subpixel:
base1 = bytes_header + n_coords*2*2
base2 = base1 + n_coords*4
for i,(_x,_y) in enumerate(zip(x_raw,y_raw)):
xs = base1 + 4*i
ys = base2 + 4*i
B[xs:xs+4] = _float(_x)
B[ys:ys+4] = _float(_y)
return bytearray(B)
def export_imagej_rois(fname, polygons, set_position=True, subpixel=True, compression=ZIP_DEFLATED):
""" polygons assumed to be a list of arrays with shape (id,2,c) """
if isinstance(polygons,np.ndarray):
polygons = (polygons,)
fname = Path(fname)
if fname.suffix == '.zip':
fname = fname.with_suffix('')
with ZipFile(str(fname)+'.zip', mode='w', compression=compression) as roizip:
for pos,polygroup in enumerate(polygons,start=1):
for i,poly in enumerate(polygroup,start=1):
roi = polyroi_bytearray(poly[1],poly[0], pos=(pos if set_position else None), subpixel=subpixel)
roizip.writestr('{pos:03d}_{i:03d}.roi'.format(pos=pos,i=i), roi)
def optimize_threshold(Y, Yhat, model, nms_thresh, measure='accuracy', iou_threshs=[0.3,0.5,0.7], bracket=None, tol=1e-2, maxiter=20, verbose=1):
""" Tune prob_thresh for provided (fixed) nms_thresh to maximize matching score (for given measure and averaged over iou_threshs). """
np.isscalar(nms_thresh) or _raise(ValueError("nms_thresh must be a scalar"))
iou_threshs = [iou_threshs] if np.isscalar(iou_threshs) else iou_threshs
values = dict()
if bracket is None:
max_prob = max([np.max(prob) for prob, dist in Yhat])
bracket = max_prob/2, max_prob
# print("bracket =", bracket)
with tqdm(total=maxiter, disable=(verbose!=1), desc="NMS threshold = %g" % nms_thresh) as progress:
def fn(thr):
prob_thresh = np.clip(thr, *bracket)
value = values.get(prob_thresh)
if value is None:
Y_instances = [model._instances_from_prediction(y.shape, *prob_dist, prob_thresh=prob_thresh, nms_thresh=nms_thresh)[0] for y,prob_dist in zip(Y,Yhat)]
stats = matching_dataset(Y, Y_instances, thresh=iou_threshs, show_progress=False, parallel=True)
values[prob_thresh] = value = np.mean([s._asdict()[measure] for s in stats])
if verbose > 1:
print("{now} thresh: {prob_thresh:f} {measure}: {value:f}".format(
now = datetime.datetime.now().strftime('%H:%M:%S'),
prob_thresh = prob_thresh,
measure = measure,
value = value,
), flush=True)
else:
progress.update()
progress.set_postfix_str("{prob_thresh:.3f} -> {value:.3f}".format(prob_thresh=prob_thresh, value=value))
progress.refresh()
return -value
opt = minimize_scalar(fn, method='golden', bracket=bracket, tol=tol, options={'maxiter': maxiter})
verbose > 1 and print('\n',opt, flush=True)
return opt.x, -opt.fun
|
the-stack_0_7624 | import pytest
from tools import utils, constants
from launchers.sandbox import Sandbox
from . import protocol
NUM_NODES = 3
@pytest.mark.multinode
@pytest.mark.incremental
class TestDoubleEndorsement:
"""Constructs a double endorsement, and build evidence."""
def test_init(self, sandbox: Sandbox):
for i in range(NUM_NODES):
sandbox.add_node(i, params=constants.NODE_PARAMS)
protocol.activate(sandbox.client(0), activate_in_the_past=True)
utils.bake(sandbox.client(0))
def test_level(self, sandbox: Sandbox):
level = 2
for client in sandbox.all_clients():
assert utils.check_level(client, level)
def test_terminate_nodes_1_and_2(self, sandbox: Sandbox):
sandbox.node(1).terminate()
sandbox.node(2).terminate()
def test_bake_node_0(self, sandbox: Sandbox):
"""Client 0 bakes block A at level 3, not communicated to 1 and 2
Inject an endorsement to ensure a different hash"""
sandbox.client(0).endorse('bootstrap1')
utils.bake(sandbox.client(0))
def test_endorse_node_0(self, sandbox: Sandbox, session: dict):
"""bootstrap1 builds an endorsement for block A"""
client = sandbox.client(0)
client.endorse('bootstrap1')
mempool = client.get_mempool()
endorsement = mempool['applied'][0]
session['endorsement1'] = endorsement
def test_terminate_node_0(self, sandbox: Sandbox):
sandbox.node(0).terminate()
def test_restart_node_2(self, sandbox: Sandbox):
sandbox.node(2).run()
assert sandbox.client(2).check_node_listening()
def test_bake_node_2(self, sandbox: Sandbox):
"""Client 2 bakes block B at level 3, not communicated to 0 and 1"""
utils.bake(sandbox.client(2))
def test_endorse_node_2(self, sandbox: Sandbox, session: dict):
"""bootstrap1 builds an endorsement for block B"""
client = sandbox.client(2)
client.endorse('bootstrap1')
mempool = client.get_mempool()
endorsement = mempool['applied'][0]
session['endorsement2'] = endorsement
sandbox.client(2).endorse('bootstrap2')
def test_restart_all(self, sandbox: Sandbox):
sandbox.node(0).run()
sandbox.node(1).run()
sandbox.client(0).check_node_listening()
sandbox.client(1).check_node_listening()
def test_check_level(self, sandbox: Sandbox):
"""All nodes are at level 3, head is either block A or B"""
level = 3
for client in sandbox.all_clients():
assert utils.check_level(client, level)
def test_forge_accusation(self, sandbox: Sandbox, session: dict):
"""Forge and inject a double endorsement evidence operation"""
client = sandbox.client(1)
head_hash = client.get_head()['hash']
def transform_endorsement(end):
return {
'branch': end['branch'],
'operations': end['contents'][0],
'signature': end['signature'],
}
endorsement1 = transform_endorsement(session['endorsement1'])
endorsement2 = transform_endorsement(session['endorsement2'])
operation = {
'branch': head_hash,
'contents': [
{
'kind': 'double_endorsement_evidence',
'op1': endorsement1,
'op2': endorsement2,
}
],
}
path_forge_operation = (
'/chains/main/blocks/head/helpers/forge/operations'
)
operation_hex_string = client.rpc(
'post', path_forge_operation, data=operation
)
assert isinstance(operation_hex_string, str)
sender_sk_long = constants.IDENTITIES['bootstrap1']['secret']
sender_sk = sender_sk_long[len('unencrypted:') :]
signed_op = utils.sign_operation(operation_hex_string, sender_sk)
op_hash = client.rpc('post', 'injection/operation', signed_op)
assert isinstance(op_hash, str)
session['operation'] = op_hash
def test_operation_applied(self, sandbox: Sandbox, session: dict):
"""Check operation is in mempool"""
client = sandbox.client(1)
assert utils.check_mempool_contains_operations(
client, [session['operation']]
)
|
the-stack_0_7626 | """
pycmark.utils.compat
~~~~~~~~~~~~~~~~~~~~
Utilities for compatibility.
:copyright: Copyright 2017-2019 by Takeshi KOMIYA
:license: Apache License 2.0, see LICENSE for details.
"""
from typing import Any, Generator
from docutils.nodes import Node
if not hasattr(Node, 'findall'): # for docutils-0.17 or older
def findall(self, *args: Any, **kwargs: Any) -> Generator[Node, None, None]:
for node in self.traverse(*args, **kwargs):
yield node
Node.findall = findall # type: ignore
|
the-stack_0_7630 | import math
import timeit
import numpy as np
import random
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
class GA():
def __init__(self, selectionPressure, mutationProbability, chromosomeCount, selectionProbability, problem):
self.selPs = selectionPressure # selection pressure
self.mutPa = mutationProbability # Probabilty of mutation
self.chrCount = chromosomeCount # size of choromosome
self.selPa = selectionProbability # selection probabilty
self.choromosome = [] # choromosome set
self.fitness = [] # fitness set
self.population = [[],[]] # [choromosome, fitness] set
self.generation = 0 # current generation
self.problem = problem # problem route
self.dist_ar = [] # [dots_list, dots_list ]distance array
self.cities_count = 0
self.dots_list = []
self.limit_time = 0
self.start = 0
self.stop = 0
def make_distDataframe(self, problem):
reader = open(problem, mode='rt', encoding='utf-8')
self.dots_list = reader.read().split("\n") # ['x1 y1', 'x2 y2', 'x3 y3' ... 'xn yn']
self.cities_count = int(self.dots_list.pop(0))
self.limit_time = float(self.dots_list.pop())
x_list = [] # ['x1', 'x2', 'x3' ... 'xn']
y_list = [] # ['y1', 'y2', 'y3' ... 'yn']
for i in range(self.cities_count):
temp = self.dots_list[i].split(" ")
x_list.append(float(temp[0]))
y_list.append(float(temp[1]))
for n in range(self.cities_count):
temp = []
for m in range(self.cities_count):
temp.append(round((math.sqrt(((x_list[m] - x_list[n]) ** 2) + ((y_list[m] - y_list[n]) ** 2))), 2))
self.dist_ar.append(temp)
self.dist_ar = np.array(self.dist_ar)
print(self.dist_ar)
def cal_fit(self, route) :
fit = 0
for i in range(len(route)-1) :
if i == len(route)-1 :
fit += self.dist_ar[route[i], route[0]]
else :
fit += self.dist_ar[route[i], route[i+1]]
return fit
def randomTwo(self, ranges):
randomList = []
randomList += random.sample(range(0, ranges), 2)
randomList.sort()
return randomList
def evolution(self) :
self.start = timeit.default_timer()
self.stop = timeit.default_timer()
# init choromosomes
self.make_distDataframe(self.problem)
for i in range(self.chrCount):
self.choromosome.append(random.sample(range(0, self.cities_count), self.cities_count))
for i in range(self.chrCount):
self.fitness.append(round(self.cal_fit(self.choromosome[i]), 5))
self.population = (np.array([self.choromosome, self.fitness])).T
self.population = self.population[np.argsort(self.population[:, 1])]
print('초기화 최적 해 : \n', self.population[0, 0], "\n", self.population[0, 1])
while self.stop - self.start <= self.limit_time:
offsprings = []
self.generation += 1
# selection : 토너먼트선택,
for endSel in range(int(self.chrCount*self.selPa)):
# 난수룰 발생시켜 해집단 내 두 유전자 선택, 선택난수 발생
# 선택난수가 선택압보다 작으면 두 유전자 중 좋은 유전자가 선택. 아니면 반대로
parents_index = [0, 0]
for i in range(len(parents_index)):
selGeneNum = self.randomTwo(self.chrCount)
match = random.random()
if match < self.selPs:
if self.population[selGeneNum[0], 1] < self.population[selGeneNum[1], 1]:
parents_index[i] = selGeneNum[0]
else:
parents_index[i] = selGeneNum[1]
else:
if self.population[selGeneNum[0], 1] < self.population[selGeneNum[1], 1]:
parents_index[i] = selGeneNum[1]
else:
parents_index[i] = selGeneNum[0]
# crossover : order-based crossover
daddy_value = self.population[parents_index[0], 0].copy()
mommy_value = self.population[parents_index[1], 0].copy()
CsGeneNum = self.randomTwo(self.cities_count)
offspring = daddy_value[CsGeneNum[0]: CsGeneNum[1]]
for i in daddy_value[CsGeneNum[0]: CsGeneNum[1]]:
mommy_value.remove(i)
for i in range(len(offspring)):
mommy_value.insert(CsGeneNum[0] + i, offspring[i])
offspring = mommy_value
offspring_fit = self.cal_fit(offspring)
# mutation : exchange mutation
mut_p = random.random()
if mut_p < self.mutPa:
MtGeneNum = self.randomTwo(self.cities_count)
mut_Temp = offspring[MtGeneNum[0]]
offspring[MtGeneNum[0]] = offspring[MtGeneNum[1]]
offspring[MtGeneNum[1]] = mut_Temp
offspring_fit = self.cal_fit(offspring)
offsprings.append(np.array([offspring, offspring_fit]))
self.population = np.vstack((self.population, offsprings))
# Replacement
self.population = self.population[np.argsort(self.population[:, 1])]
for i in range(int(self.chrCount*self.selPa)) :
self.population = np.delete(self.population, len(self.population)-1, axis=0)
if self.generation % 5000 == 0:
print(self.generation, '세대 최적 해 : \n', self.population[0, 1])
print(self.population[0, 0])
self.stop = timeit.default_timer()
if __name__ == "__main__":
ga = GA(selectionPressure=0.7, mutationProbability=0.2, chromosomeCount=20, selectionProbability=0.5, problem="cycle51.in")
ga.evolution()
plotData = []
for index in ga.population[0, 0]:
plotData.append([round(float(ga.dots_list[int(index)].split(" ")[0]), 3),
round(float(ga.dots_list[int(index)].split(" ")[1]), 3)])
plotData = np.array(plotData)
plotData = plotData.T
textStr = "fitness :", ga.population[0, 1]
axs = plt.plot(plotData[0], plotData[1])
plt.text(0.05, 0.95, textStr, fontsize=20, fontweight='bold')
plt.show()
print(ga.generation, '세대 최적 해 : \n', ga.population[0, 1])
print(ga.population[0, 0])
print(ga.stop - ga.start)
|
the-stack_0_7631 | from twilio.rest import Client
import json
class MessageClient:
def __init__(self):
print('Initializing messaging client')
with open('twiliocredentials.json') as creds:
twiliocred = json.loads(creds.read())
twilio_number = int(twiliocred.get('trial_number'))
twilio_account_sid = twiliocred.get('account_sid')
twilio_auth_token = twiliocred.get('auth_token')
self.twilio_number = twilio_number
self.twilio_client = Client(twilio_account_sid, twilio_auth_token)
print('Twilio client initialized')
def send_message(self, body, to):
self.twilio_client.messages.create(
body=body,
to=to,
from_=self.twilio_number,
# media_url=['https://demo.twilio.com/owl.png']
) |
the-stack_0_7632 | import os
for fname in os.listdir('.'):
spl = fname.split('.')
if len(spl) <= 1:
continue
ext = spl[-1]
if ext != 'arc':
continue
print("[Python] Fixing ARC File ", fname)
# ARCTool produces a 0x11 but a 0x07 is expected by the collection
with open(fname, 'r+b') as file:
file.seek(4)
file.write(0x07.to_bytes(1, byteorder='little')) |
the-stack_0_7634 | ######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
# CHECKING FLAGGING OF ALL CALIBRATORS
# use rflag mode of flagdata
logprint ("Starting EVLA_pipe_checkflag_semiFinal.py", logfileout='logs/checkflag_semiFinal.log')
time_list=runtiming('checkflag_semiFinal', 'start')
QA2_checkflag_semiFinal='Pass'
logprint ("Checking RFI flagging of all calibrators", logfileout='logs/checkflag_semiFinal.log')
default('flagdata')
vis=ms_active
mode='rflag'
field=calibrator_field_select_string
correlation='ABS_'+corrstring
scan=calibrator_scan_select_string
ntime='scan'
combinescans=False
datacolumn='corrected'
winsize=3
timedevscale=4.0
freqdevscale=4.0
extendflags=False
action='apply'
display=''
flagbackup=False
savepars=True
flagdata()
#clearstat()
# Until we know what the QA criteria are for this script, leave QA2
# set score to "Pass".
logprint ("QA2 score: "+QA2_checkflag_semiFinal, logfileout='logs/checkflag_semiFinal.log')
logprint ("Finished EVLA_pipe_checkflag_semiFinal.py", logfileout='logs/checkflag_semiFinal.log')
time_list=runtiming('checkflag_semiFinal', 'end')
pipeline_save()
######################################################################
|
the-stack_0_7635 | #! python3
import SimpleITK as sitk
import numpy as np
def ApplyBiasCorrection(inputImage, shrinkFactor = (1,1,1)):
# Bias correction filter:
biasFieldCorrFilter = sitk.N4BiasFieldCorrectionImageFilter()
mask = sitk.OtsuThreshold( inputImage, 0, 1, 100)
inputImage = sitk.Cast(inputImage, sitk.sitkFloat32)
# Parameter for the bias corredtion filter:
biasFieldCorrFilter.SetSplineOrder(3)
biasFieldCorrFilter.SetConvergenceThreshold(0.0001)
biasFieldCorrFilter.SetMaximumNumberOfIterations((50, 40, 30))
if shrinkFactor != (1,1,1):
# Shrink image and mask to accelerate:
shrinkedInput = sitk.Shrink(inputImage, shrinkFactor)
mask = sitk.Shrink(mask, shrinkFactor)
#biasFieldCorrFilter.SetNumberOfThreads()
#biasFieldCorrFilter.UseMaskLabelOff() # Because I'm having problems with the mask.
# Run the filter:
output = biasFieldCorrFilter.Execute(shrinkedInput, mask)
# Get the field by dividing the output by the input:
outputArray = sitk.GetArrayFromImage(output)
shrinkedInputArray = sitk.GetArrayFromImage(shrinkedInput)
biasFieldArray = np.ones(np.shape(outputArray), 'float32')
biasFieldArray[shrinkedInputArray != 0] = outputArray[shrinkedInputArray != 0]/shrinkedInputArray[shrinkedInputArray != 0]
biasFieldArray[shrinkedInputArray == 0] = 0
# Generate bias field image:
biasField = sitk.GetImageFromArray(biasFieldArray)
biasField.SetSpacing(shrinkedInput.GetSpacing())
biasField.SetOrigin(shrinkedInput.GetOrigin())
biasField.SetDirection(shrinkedInput.GetDirection())
# Now expand
biasField = sitk.Resample(biasField, inputImage)
# Apply to the image:
output = sitk.Multiply(inputImage, biasField)
else:
#output = biasFieldCorrFilter.Execute(inputImage, mask)
output = biasFieldCorrFilter.Execute(inputImage)
# return the output:
return output
|
the-stack_0_7639 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Scriptim (https://github.com/Scriptim)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module runs a `abalone.game.Game`."""
from traceback import format_exc
from typing import Generator, List, Tuple, Union
from abalone.abstract_player import AbstractPlayer
from abalone.enums import Direction, Player, Space
from abalone.game import Game, IllegalMoveException
from abalone.utils import line_from_to
def _get_winner(score: Tuple[int, int]) -> Union[Player, None]:
"""Returns the winner of the game based on the current score.
Args:
score: The score tuple returned by `abalone.game.Game.get_score`
Returns:
Either the `abalone.enums.Player` who won the game or `None` if no one has won yet.
"""
if 8 in score:
return Player.WHITE if score[0] == 8 else Player.BLACK
return None
def _format_move(turn: Player, move: Tuple[Union[Space, Tuple[Space, Space]], Direction], moves: int) -> str:
"""Formats a player's move as a string with a single line.
Args:
turn: The `Player` who performs the move
move: The move as returned by `abalone.abstract_player.AbstractPlayer.turn`
moves: The number of total moves made so far (not including this move)
"""
marbles = [move[0]] if isinstance(move[0], Space) else line_from_to(*move[0])[0]
marbles = map(lambda space: space.name, marbles)
return f'{moves + 1}: {turn.name} moves {", ".join(marbles)} in direction {move[1].name}'
def run_game(black: AbstractPlayer, white: AbstractPlayer, **kwargs) \
-> Generator[Tuple[Game, List[Tuple[Union[Space, Tuple[Space, Space]], Direction]]], None, None]:
"""Runs a game instance and prints the progress / current state at every turn.
Args:
black: An `abalone.abstract_player.AbstractPlayer`
white: An `abalone.abstract_player.AbstractPlayer`
**kwargs: These arguments are passed to `abalone.game.Game.__init__`
Yields:
A tuple of the current `abalone.game.Game` instance and the move history at the start of the game and after\
every legal turn.
"""
game = Game()
moves_history = []
yield game, moves_history
while True:
score = game.get_score()
score_str = f'BLACK {score[0]} - WHITE {score[1]}'
print(score_str, game, '', sep='\n')
winner = _get_winner(score)
if winner is not None:
print(f'{winner.name} won!')
break
try:
move = black.turn(game, moves_history) if game.turn is Player.BLACK else white.turn(game, moves_history)
print(_format_move(game.turn, move, len(moves_history)), end='\n\n')
game.move(*move)
game.switch_player()
moves_history.append(move)
yield game, moves_history
except IllegalMoveException as ex:
print(f'{game.turn.name}\'s tried to perform an illegal move ({ex})\n')
break
except:
print(f'{game.turn.name}\'s move caused an exception\n')
print(format_exc())
break
if __name__ == '__main__': # pragma: no cover
# Run a game from the command line with default configuration.
import importlib
import sys
if len(sys.argv) != 3:
sys.exit(1)
black = sys.argv[1].rsplit('.', 1)
black = getattr(importlib.import_module(black[0]), black[1])
white = sys.argv[2].rsplit('.', 1)
white = getattr(importlib.import_module(white[0]), white[1])
list(run_game(black(), white()))
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.