repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
wolverineav/horizon | openstack_dashboard/dashboards/project/networks/ports/tables.py | 1 | 3608 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import template
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
def get_fixed_ips(port):
template_name = 'project/networks/ports/_port_ips.html'
context = {"ips": port.fixed_ips}
return template.loader.render_to_string(template_name, context)
def get_attached(port):
if port['device_owner']:
return port['device_owner']
elif port['device_id']:
return _('Attached')
else:
return _('Detached')
class UpdatePort(policy.PolicyTargetMixin, tables.LinkAction):
name = "update"
verbose_name = _("Edit Port")
url = "horizon:project:networks:editport"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_port"),)
def get_link_url(self, port):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id, port.id))
DISPLAY_CHOICES = (
("UP", pgettext_lazy("Admin state of a Port", u"UP")),
("DOWN", pgettext_lazy("Admin state of a Port", u"DOWN")),
)
STATUS_DISPLAY_CHOICES = (
("ACTIVE", pgettext_lazy("status of a network port", u"Active")),
("DOWN", pgettext_lazy("status of a network port", u"Down")),
("ERROR", pgettext_lazy("status of a network port", u"Error")),
("BUILD", pgettext_lazy("status of a network port", u"Build")),
)
class PortsTable(tables.DataTable):
name = tables.Column("name_or_id",
verbose_name=_("Name"),
link="horizon:project:networks:ports:detail")
fixed_ips = tables.Column(get_fixed_ips, verbose_name=_("Fixed IPs"))
attached = tables.Column(get_attached, verbose_name=_("Attached Device"))
status = tables.Column("status",
verbose_name=_("Status"),
display_choices=STATUS_DISPLAY_CHOICES)
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=DISPLAY_CHOICES)
mac_state = tables.Column("mac_state", empty_value=api.neutron.OFF_STATE,
verbose_name=_("MAC Learning State"))
def get_object_display(self, port):
return port.id
class Meta(object):
name = "ports"
verbose_name = _("Ports")
table_actions = (tables.FilterAction,)
row_actions = (UpdatePort,)
hidden_title = False
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(PortsTable, self).__init__(request, data=data,
needs_form_wrapper=needs_form_wrapper,
**kwargs)
if not api.neutron.is_extension_supported(request, 'mac-learning'):
del self.columns['mac_state']
| apache-2.0 | -4,700,138,844,131,522,000 | 36.195876 | 79 | 0.627494 | false |
ZuluPro/abdallah | abdallah/docker_utils.py | 1 | 1262 | import tempfile
import docker
from django.template import loader, Context
def get_docker_client():
"""
Get configured docker client.
:param params: Settings module with Docker client configuration
:type params: :mod:`abdallah.settings`
:returns: Configured Docker client
:rtype: :class:`docker.Client`
"""
from abdallah import settings
client = docker.Client(base_url=settings.DOCKER['BASE_URL'],
version=settings.DOCKER['VERSION'],
timeout=settings.DOCKER['TIMEOUT'],
tls=settings.DOCKER['TLS'])
return client
def get_job_host_config(job, job_attr):
template = loader.get_template('abdallah/job.sh')
context_dict = job_attr.copy()
context_dict.update({'job': job})
context = Context(context_dict)
init_script_path = tempfile.mktemp('abdallah')
volumes = [init_script_path]
with open(init_script_path, 'w') as init_script:
init_script.write(template.render(context))
# host_config = docker.utils.create_host_config(binds=[
# '/init.sh:%s:ro' % init_script_path,
# ])
host_config = {
init_script_path: {'bind': '/job.sh', 'ro': True}
}
return volumes, host_config
| bsd-3-clause | -8,365,612,540,010,252,000 | 31.358974 | 67 | 0.627575 | false |
kirbyfan64/arclib | docs/source/conf.py | 1 | 9308 | # -*- coding: utf-8 -*-
#
# arclib documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 18 14:25:46 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'arclib'
copyright = u'2016, Ryan Gonzalez'
author = u'Ryan Gonzalez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
try:
import sphinx_rtd_theme
except ImportError:
html_theme = 'alabaster'
else:
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'arclibdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'arclib.tex', u'arclib Documentation',
u'Ryan Gonzalez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'arclib', u'arclib Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'arclib', u'arclib Documentation',
author, 'arclib', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -6,549,107,251,327,249,000 | 31.096552 | 79 | 0.706596 | false |
muneebalam/scrapenhl | scrapenhl/scrape_game.py | 1 | 22209 | import scrapenhl_globals
import os.path
def get_url(season, game):
"""
Returns the NHL API url to scrape.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
str
URL to scrape, http://statsapi.web.nhl.com/api/v1/game/[season]0[game]/feed/live
"""
return 'http://statsapi.web.nhl.com/api/v1/game/{0:d}0{1:d}/feed/live'.format(season, game)
def get_shift_url(season, game):
"""
Returns the NHL API shifts url to scrape.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
str
http://www.nhl.com/stats/rest/shiftcharts?cayenneExp=gameId=[season]0[game]
"""
return 'http://www.nhl.com/stats/rest/shiftcharts?cayenneExp=gameId={0:d}0{1:d}'.format(season, game)
def get_json_save_filename(season, game):
"""
Returns the algorithm-determined save file name of the json accessed online.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
str
file name, SAVE_FOLDER/Season/Game.zlib
"""
return os.path.join(scrapenhl_globals.SAVE_FOLDER, season, '{0:d}.zlib'.format(game))
def get_shift_save_filename(season, game):
"""
Returns the algorithm-determined save file name of the shift json accessed online.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
str
file name, SAVE_FOLDER/Season/Game_shifts.zlib
"""
return os.path.join(scrapenhl_globals.SAVE_FOLDER, season, '{0:d}_shifts.zlib'.format(game))
def get_parsed_save_filename(season, game):
"""
Returns the algorithm-determined save file name of the parsed pbp file.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
str
file name, SAVE_FOLDER/Season/Game_parsed.zlib
"""
return os.path.join(scrapenhl_globals.SAVE_FOLDER, season, '{0:d}_parsed.hdf5'.format(game))
def get_parsed_shifts_save_filename(season, game):
"""
Returns the algorithm-determined save file name of the parsed toi file.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
str
file name, SAVE_FOLDER/Season/Game_shifts_parsed.zlib
"""
return os.path.join(scrapenhl_globals.SAVE_FOLDER, season, '{0:d}_shifts_parsed.hdf5'.format(game))
def scrape_game(season, game, force_overwrite = False):
"""
Scrapes and saves game files in compressed (zlib) format
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
force_overwrite : bool
If True, will overwrite previously raw html files. If False, will not scrape if files already found.
Returns
-------
bool
A boolean indicating whether the NHL API was queried.
"""
query = False
import os.path
url = get_url(season, game)
filename = get_json_save_filename(season, game)
if force_overwrite or not os.path.exists(filename):
import urllib.request
try:
query = True
with urllib.request.urlopen(url) as reader:
page = reader.read()
except Exception as e:
if game < 30111:
print('Error reading pbp url for', season, game, e, e.args)
page = bytes('', encoding = 'latin-1')
if True:#game < 30111:
import zlib
page2 = zlib.compress(page, level=9)
w = open(filename, 'wb')
w.write(page2)
w.close()
url = get_shift_url(season, game)
filename = get_shift_save_filename(season, game)
if force_overwrite or not os.path.exists(filename):
import urllib.request
try:
query = True
with urllib.request.urlopen(url) as reader:
page = reader.read()
except Exception as e:
if game < 30111:
print('Error reading shift url for', season, game, e, e.args)
page = bytes('', encoding='latin-1')
if True:#game < 30111:
import zlib
page2 = zlib.compress(page, level=9)
w = open(filename, 'wb')
w.write(page2)
w.close()
return query
def parse_game(season, game, force_overwrite = False):
"""
Reads this game's zlib file from disk and parses into a friendlier format, then saves again to disk in zlib.
This method also updates the global player id and game log files, and writes any updates to disk.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
force_overwrite : bool
If True, will overwrite previously raw html files. If False, will not scrape if files already found.
"""
import os.path
import zlib
import json
import pandas as pd
filename = get_parsed_save_filename(season, game)
if ((force_overwrite or not os.path.exists(filename)) and os.path.exists(get_json_save_filename(season, game))):
r = open(get_json_save_filename(season, game), 'rb')
page = r.read()
r.close()
page = zlib.decompress(page)
try:
data = json.loads(page.decode('latin-1'))
teamdata = data['liveData']['boxscore']['teams']
update_team_ids_from_json(teamdata)
update_player_ids_from_json(teamdata)
update_quick_gamelog_from_json(data)
events = read_events_from_json(data['liveData']['plays']['allPlays'])
if events is not None:
events.to_hdf(filename, key='Game{0:d}0{1:d}'.format(season, game), mode='w',
complevel=9, complib='zlib')
#pbp_compressed = zlib.compress(bytes(events, encoding = 'latin-1'), level=9)
#w = open(filename, 'wb')
#w.write(pbp_compressed)
#w.close()
except json.JSONDecodeError:
pass
filename = get_parsed_shifts_save_filename(season, game)
basic_gamelog = scrapenhl_globals.get_quick_gamelog_file()
if ((force_overwrite or not os.path.exists(filename)) and os.path.exists(get_shift_save_filename(season, game))):
r = open(get_shift_save_filename(season, game), 'rb')
page = r.read()
r.close()
page = zlib.decompress(page)
try:
data = json.loads(page.decode('latin-1'))
try:
thisgamedata = basic_gamelog.query('Season == {0:d} & Game == {1:d}'.format(season, game))
rname = thisgamedata['Away'].iloc[0]
hname = thisgamedata['Home'].iloc[0]
except Exception as e:
hname = None
rname = None
shifts = read_shifts_from_json(data['data'], hname, rname)
if shifts is not None:
#shifts = ''
#shifts_compressed = zlib.compress(shifts, level=9)
#w = open(filename, 'wb')
#w.write(shifts_compressed)
#w.close()
shifts.to_hdf(filename, key = 'Game{0:d}0{1:d}'.format(season, game), mode = 'w',
complevel = 9, complib = 'zlib')
except json.JSONDecodeError:
pass
def read_shifts_from_json(data, homename = None, roadname = None):
if len(data) == 0:
return
ids = ['' for i in range(len(data))]
periods = [0 for i in range(len(data))]
starts = ['0:00' for i in range(len(data))]
ends = ['0:00' for i in range(len(data))]
teams = ['' for i in range(len(data))]
durations = [0 for i in range(len(data))]
for i, dct in enumerate(data):
ids[i] = dct['playerId']
periods[i] = dct['period']
starts[i] = dct['startTime']
ends[i] = dct['endTime']
durations[i] = dct['duration']
teams[i] = dct['teamAbbrev']
### Seems like home players come first
if homename is None:
homename = teams[0]
for i in range(len(teams) - 1, 0, -1):
if not teams[i] == homename:
roadname = teams[i]
break
startmin = [x[:x.index(':')] for x in starts]
startsec = [x[x.index(':') + 1:] for x in starts]
starttimes = [1200 * (p-1) + 60 * int(m) + int(s) for p, m, s in zip(periods, startmin, startsec)]
endmin = [x[:x.index(':')] for x in ends]
endsec = [x[x.index(':') + 1:] for x in ends]
### There is an extra -1 in endtimes to avoid overlapping start/end
endtimes = [1200 * (p - 1) + 60 * int(m) + int(s) - 1 for p, m, s in zip(periods, endmin, endsec)]
durationtime = [e - s for s, e in zip(starttimes, endtimes)]
import pandas as pd
df = pd.DataFrame({'PlayerID': ids, 'Period': periods, 'Start': starttimes, 'End': endtimes,
'Team': teams, 'Duration': durationtime})
df.loc[df.End < df.Start, 'End'] = df.End + 1200
tempdf = df[['PlayerID', 'Start', 'End', 'Team', 'Duration']]
tempdf = tempdf.assign(Time = tempdf.Start)
#print(tempdf.head(20))
toi = pd.DataFrame({'Time': [i for i in range(0, max(df.End) + 1)]})
toidfs = []
while len(tempdf.index) > 0:
temptoi = toi.merge(tempdf, how = 'inner', on = 'Time')
toidfs.append(temptoi)
tempdf = tempdf.assign(Time = tempdf.Time + 1)
tempdf = tempdf.query('Time <= End')
tempdf = pd.concat(toidfs)
tempdf = tempdf.sort_values(by = 'Time')
### Append team name to start of columns by team
hdf = tempdf.query('Team == "' + homename + '"')
hdf2 = hdf.groupby('Time').rank()
hdf2 = hdf2.rename(columns = {'PlayerID': 'rank'})
hdf2.loc[:, 'rank'] = hdf2['rank'].apply(lambda x: int(x))
hdf.loc[:, 'rank'] = homename + hdf2['rank'].astype('str')
rdf = tempdf.query('Team == "' + roadname + '"')
rdf2 = rdf.groupby('Time').rank()
rdf2 = rdf2.rename(columns={'PlayerID': 'rank'})
rdf2.loc[:, 'rank'] = rdf2['rank'].apply(lambda x: int(x))
rdf.loc[:, 'rank'] = roadname + rdf2['rank'].astype('str')
### Occasionally bad entries make duplicates on time and rank. Take one with longer duration
tokeep = hdf.sort_values(by = 'Duration', ascending = False)
tokeep = tokeep.groupby(['Time', 'PlayerID']).first()
tokeep.reset_index(inplace = True)
hdf = hdf.merge(tokeep, how = 'inner', on = ['Time', 'PlayerID', 'Start', 'End', 'Team', 'rank'])
tokeep = rdf.sort_values(by='Duration', ascending=False)
tokeep = tokeep.groupby(['Time', 'PlayerID']).first()
tokeep.reset_index(inplace=True)
rdf = rdf.merge(tokeep, how='inner', on=['Time', 'PlayerID', 'Start', 'End', 'Team', 'rank'])
### Remove values above 6--looking like there won't be many
### TODO: keep goalie if one is a goalie!
hdf = hdf.pivot(index = 'Time', columns = 'rank', values = 'PlayerID').iloc[:, 0:6]
hdf.reset_index(inplace = True) #get time back as a column
rdf = rdf.pivot(index='Time', columns='rank', values='PlayerID').iloc[:, 0:6]
rdf.reset_index(inplace = True)
toi = toi.merge(hdf, how = 'left', on = 'Time').merge(rdf, how = 'left', on = 'Time')
return(toi)
def update_team_ids_from_json(teamdata):
import urllib.request
import json
import pandas as pd
hid = teamdata['home']['team']['id']
team_ids = scrapenhl_globals.get_team_id_file()
if hid not in team_ids.ID.values:
url = 'https://statsapi.web.nhl.com{0:s}'.format(teamdata['home']['team']['link'])
with urllib.request.urlopen(url) as reader:
page = reader.read()
teaminfo = json.loads(page.decode('latin-1'))
hid = teaminfo['teams'][0]['id']
habbrev = teaminfo['teams'][0]['abbreviation']
hname = teaminfo['teams'][0]['name']
df = pd.DataFrame({'ID': [hid], 'Abbreviation': [habbrev], 'Name': [hname]})
team_ids = pd.concat([team_ids, df])
scrapenhl_globals.write_team_id_file(team_ids)
rid = teamdata['away']['team']['id']
if rid not in team_ids.ID.values:
url = 'https://statsapi.web.nhl.com{0:s}'.format(teamdata['away']['team']['link'])
with urllib.request.urlopen(url) as reader:
page = reader.read()
teaminfo = json.loads(page.decode('latin-1'))
rid = teaminfo['teams'][0]['id']
rabbrev = teaminfo['teams'][0]['abbreviation']
rname = teaminfo['teams'][0]['name']
df = pd.DataFrame({'ID': [rid], 'Abbreviation': [rabbrev], 'Name': [rname]})
team_ids = pd.concat([team_ids, df])
scrapenhl_globals.write_team_id_file(team_ids)
def update_player_ids_from_json(teamdata):
"""
Creates a data frame of player data from current game's json[liveData][boxscore] to update player ids.
This method reads player ids, names, handedness, team, position, and number, and full joins to player ids.
If there are any changes to player ids, the dataframe gets written to disk again.
Parameters
-----------
teamdata : dict
A json dict that is the result of api_page['liveData']['boxscore']['teams']
"""
team_ids = scrapenhl_globals.get_team_id_file()
rteam = team_ids.query('ID == ' + str(teamdata['away']['team']['id']))
rabbrev = rteam['Abbreviation'].iloc[0]
hteam = team_ids.query('ID == ' + str(teamdata['home']['team']['id']))
habbrev = hteam['Abbreviation'].iloc[0]
awayplayers = teamdata['away']['players']
homeplayers = teamdata['home']['players']
numplayers = len(awayplayers) + len(homeplayers)
ids = ['' for i in range(numplayers)]
names = ['' for i in range(numplayers)]
teams = ['' for i in range(numplayers)]
positions = ['' for i in range(numplayers)]
nums = [-1 for i in range(numplayers)]
handedness = ['' for i in range(numplayers)]
for i, (pid, pdata) in enumerate(awayplayers.items()):
idnum = pid[2:]
name = pdata['person']['fullName']
try:
hand = pdata['person']['shootsCatches']
except KeyError:
hand = 'N/A'
try:
num = pdata['jerseyNumber']
if num == '':
raise KeyError
else:
num = int(num)
except KeyError:
num = -1
pos = pdata['position']['code']
ids[i] = idnum
names[i] = name
teams[i] = rabbrev
positions[i] = pos
nums[i] = num
handedness[i] = hand
for i, (pid, pdata) in enumerate(homeplayers.items()):
idnum = pid[2:]
name = pdata['person']['fullName']
try:
hand = pdata['person']['shootsCatches']
except KeyError:
hand = 'N/A'
try:
num = pdata['jerseyNumber']
if num == '':
raise KeyError
else:
num = int(num)
except KeyError:
num = -1
pos = pdata['position']['code']
ids[i + len(awayplayers)] = idnum
names[i + len(awayplayers)] = name
teams[i + len(awayplayers)] = habbrev
positions[i + len(awayplayers)] = pos
nums[i + len(awayplayers)] = num
handedness[i + len(awayplayers)] = hand
import pandas as pd
gamedf = pd.DataFrame({'ID': ids,
'Name': names,
'Team': teams,
'Pos': positions,
'#': nums,
'Hand': handedness})
gamedf['Count'] = 1
player_ids = scrapenhl_globals.get_player_id_file()
player_ids = pd.concat([player_ids, gamedf]) \
.groupby(['ID', 'Name', 'Team', 'Pos', '#', 'Hand']).sum().reset_index()
scrapenhl_globals.write_player_id_file(player_ids)
def update_quick_gamelog_from_json(data):
"""
Creates a data frame of basic game data from current game's json to update global BASIC_GAMELOG.
This method reads the season, game, date and time, venue, and team names, coaches, anc scores, joining to
BASIC_GAMELOG.
If there are any changes to BASIC_GAMELOG, the dataframe gets written to disk again.
Parameters
-----------
data : dict
The full json dict from the api_page
"""
season = int(str(data['gameData']['game']['pk'])[:4])
game = int(str(data['gameData']['game']['pk'])[4:])
datetime = data['gameData']['datetime']['dateTime']
try:
venue = data['gameData']['venue']['name']
except KeyError:
venue = 'N/A'
team_ids = scrapenhl_globals.get_team_id_file()
hname = team_ids.query('ID == ' + str(data['gameData']['teams']['home']['id']))
hname = hname['Abbreviation'].iloc[0]
rname = team_ids.query('ID == ' + str(data['gameData']['teams']['away']['id']))
rname = rname['Abbreviation'].iloc[0]
try:
hcoach = data['liveData']['boxscore']['teams']['home']['coaches'][0]['person']['fullName']
except IndexError:
hcoach = 'N/A'
try:
rcoach = data['liveData']['boxscore']['teams']['away']['coaches'][0]['person']['fullName']
except IndexError:
rcoach = 'N/A'
hscore = data['liveData']['boxscore']['teams']['home']['teamStats']['teamSkaterStats']['goals']
rscore = data['liveData']['boxscore']['teams']['away']['teamStats']['teamSkaterStats']['goals']
import pandas as pd
gamedf = pd.DataFrame({'Season': [season], 'Game': [game], 'Datetime': [datetime], 'Venue': [venue],
'Home': [hname], 'HomeCoach': [hcoach], 'HomeScore': [hscore],
'Away': [rname], 'AwayCoach': [rcoach], 'AwayScore': [rscore]})
basic_gamelog = scrapenhl_globals.get_quick_gamelog_file()
basic_gamelog = pd.concat([basic_gamelog, gamedf]).drop_duplicates()
scrapenhl_globals.write_quick_gamelog_file(basic_gamelog)
def read_events_from_json(pbp):
"""
Returns the NHL API url to scrape.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
game : int
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
Returns
--------
pandas df
Dataframe of the game's play by play data
"""
import numpy as np
import pandas as pd
index = [i for i in range(len(pbp))]
period = [-1 for i in range(len(pbp))]
time = ['0:00' for i in range(len(pbp))]
event = ['NA' for i in range(len(pbp))]
team = [-1 for i in range(len(pbp))]
p1 = [-1 for i in range(len(pbp))]
p1role = ['' for i in range(len(pbp))]
p2 = [-1 for i in range(len(pbp))]
p2role = ['' for i in range(len(pbp))]
xy = [(np.NaN, np.NaN) for i in range(len(pbp))]
note = ['' for i in range(len(pbp))]
for i in range(len(pbp)):
period[i] = int(pbp[i]['about']['period'])
time[i] = pbp[i]['about']['periodTime']
event[i] = pbp[i]['result']['event']
try:
xy[i] = (float(pbp[i]['coordinates']['x']), float(pbp[i]['coordinates']['y']))
except KeyError:
pass
try:
team[i] = pbp[i]['team']['id']
except KeyError:
pass
try:
p1[i] = pbp[i]['players'][0]['player']['id']
p1role[i] = pbp[i]['players'][0]['playerType']
except KeyError:
pass
try:
p2[i] = pbp[i]['players'][1]['player']['id']
p2role[i] = pbp[i]['players'][1]['playerType']
except KeyError:
pass
except IndexError: #e.g. on a give or take
pass
try:
note[i] = pbp[i]['result']['description']
except KeyError:
pass
#print(period[i], time[i], event[i], xy[i], team[i], p1[i], p1role[i], p2[i], p2role[i])
pbpdf = pd.DataFrame({'Index': index, 'Period': period, 'Time': time, 'Event': event,
'Team': team, 'Actor': p1, 'ActorRole': p1role, 'Recipient': p2, 'RecipientRole': p2role,
'XY': xy, 'Note': note})
return pbpdf
| mit | 3,533,887,664,172,274,000 | 36.706282 | 117 | 0.578639 | false |
b3j0f/middleware | setup.py | 1 | 3807 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2014 Jonathan Labéjof <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""b3j0f.middleware building script."""
from setuptools import setup, find_packages
from os.path import abspath, dirname, join
from re import compile as re_compile, S as re_S
NAME = 'b3j0f.middleware' # library name
NAMEPATH = NAME.replace('.', '/')
BASEPATH = dirname(abspath(__file__))
# get long description from setup directory abspath
with open(join(BASEPATH, 'README.rst')) as f:
DESC = f.read()
# Get the version - do not use normal import because it does break coverage
# thanks to the python jira project
# (https://github.com/pycontribs/jira/blob/master/setup.py)
with open(join(BASEPATH, NAMEPATH, 'version.py')) as f:
_STREAM = f.read()
_REGEX = r'.*__version__ = \'(.*?)\''
VERSION = re_compile(_REGEX, re_S).match(_STREAM).group(1)
KEYWORDS = [
'utils', 'middleware', 'API', 'tools', 'dynamic', 'reflection', 'reflect',
'runtime', 'abstract', 'common'
]
DEPENDENCIES = []
with open(join(BASEPATH, 'requirements.txt')) as f:
DEPENDENCIES = list(line for line in f.readlines())
DESCRIPTION = 'Middleware utilities library'
URL = 'https://github.com/{0}'.format(NAMEPATH)
setup(
name=NAME,
version=VERSION,
packages=find_packages(exclude=['test.*', '*.test.*']),
author='b3j0f',
author_email='[email protected]',
install_requires=DEPENDENCIES,
description=DESCRIPTION,
long_description=DESC,
include_package_data=True,
url=URL,
license='MIT License',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Natural Language :: French',
'Operating System :: OS Independent',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
test_suite='b3j0f',
keywords=KEYWORDS
)
| mit | -2,162,318,671,613,996,300 | 36.303922 | 79 | 0.6523 | false |
openstack/zaqar | zaqar/tests/unit/transport/wsgi/v1/test_home.py | 1 | 2242 | # Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import falcon
from oslo_serialization import jsonutils
import six.moves.urllib.parse as urlparse
from zaqar.tests.unit.transport.wsgi import base
class TestHomeDocument(base.V1Base):
config_file = 'wsgi_mongodb.conf'
def test_json_response(self):
body = self.simulate_get(self.url_prefix + '/')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
content_type = self.srmock.headers_dict['Content-Type']
self.assertEqual('application/json-home', content_type)
try:
jsonutils.loads(body[0])
except ValueError:
self.fail('Home document is not valid JSON')
def test_href_template(self):
body = self.simulate_get(self.url_prefix + '/')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
resp = jsonutils.loads(body[0])
queue_href_template = resp['resources']['rel/queue']['href-template']
path_1 = 'https://zaqar.example.com' + self.url_prefix
path_2 = 'https://zaqar.example.com' + self.url_prefix + '/'
# Verify all the href template start with the correct version prefix
for resource in list(resp['resources']):
self.assertTrue(resp['resources'][resource]['href-template'].
startswith(self.url_prefix))
url = urlparse.urljoin(path_1, queue_href_template)
expected = ('https://zaqar.example.com' + self.url_prefix +
'/queues/foo')
self.assertEqual(expected, url.format(queue_name='foo'))
url = urlparse.urljoin(path_2, queue_href_template)
self.assertEqual(expected, url.format(queue_name='foo'))
| apache-2.0 | 6,833,967,138,894,138,000 | 38.333333 | 79 | 0.670384 | false |
mozilla/firefox-flicks | flicks/base/tests/test_helpers.py | 1 | 1697 | from datetime import datetime
from mock import patch
from nose.tools import eq_
from flicks.base.helpers import babel_date, country_name
from flicks.base.tests import TestCase
class TestHelpers(TestCase):
def test_babel_date(self):
date = datetime(2011, 9, 23)
with self.activate('en-US'):
eq_(babel_date(date, 'short'), '9/23/11')
eq_(babel_date(date, 'medium'), 'Sep 23, 2011')
with self.activate('fr'):
eq_(babel_date(date, 'short'), '23/09/11')
eq_(babel_date(date, 'medium'), '23 sept. 2011')
@patch('flicks.base.helpers.product_details')
def test_country_name(self, product_details):
product_details.get_regions.side_effect = lambda l: {'au': 'test'}
with self.activate('fr'):
name = country_name('au')
eq_(name, 'test')
product_details.get_regions.assert_called_with('fr')
@patch('flicks.base.helpers.product_details')
def test_country_name_es(self, product_details):
"""
When `es` is passed as the locale, country_name should use `es-ES` as
the locale for product_details.
"""
product_details.get_regions.side_effect = lambda l: {'fr': 'test'}
with self.activate('es'):
name = country_name('fr')
eq_(name, 'test')
product_details.get_regions.assert_called_with('es-ES')
@patch('flicks.base.helpers.product_details')
def test_country_name_empty(self, product_details):
"""If the given country code can't be found, return an empty string."""
product_details.get_regions.side_effect = lambda l: {'fr': 'test'}
eq_(country_name('au'), '')
| bsd-3-clause | -276,973,227,105,677,020 | 35.106383 | 79 | 0.614025 | false |
Incubaid/pyrakoon | pyrakoon/client/admin.py | 1 | 1551 | # This file is part of Pyrakoon, a distributed key-value store client.
#
# Copyright (C) 2013, 2014 Incubaid BVBA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Administrative client interface'''
from pyrakoon.client import utils
from pyrakoon.protocol import admin
class ClientMixin: #pylint: disable=W0232,C1001
'''Mixin providing client actions for node administration
This can be mixed into any class implementing
:class:`pyrakoon.client.AbstractClient`.
'''
#pylint: disable=C0111,R0201
@utils.call(admin.OptimizeDB) #pylint: disable=E1101
def optimize_db(self):
assert False
@utils.call(admin.DefragDB) #pylint: disable=E1101
def defrag_db(self):
assert False
@utils.call(admin.DropMaster) #pylint: disable=E1101
def drop_master(self):
assert False
@utils.call(admin.CollapseTlogs) #pylint: disable=E1101
def collapse_tlogs(self):
assert False
@utils.call(admin.FlushStore) #pylint: disable=E1101
def flush_store(self):
assert False
| apache-2.0 | -7,529,183,197,661,053,000 | 31.3125 | 74 | 0.724049 | false |
KeyWeeUsr/plyer | plyer/platforms/android/proximity.py | 1 | 2058 | from jnius import autoclass
from jnius import cast
from jnius import java_method
from jnius import PythonJavaClass
from plyer.platforms.android import activity
from plyer.facades import Proximity
ActivityInfo = autoclass('android.content.pm.ActivityInfo')
Context = autoclass('android.content.Context')
Sensor = autoclass('android.hardware.Sensor')
SensorManager = autoclass('android.hardware.SensorManager')
class ProximitySensorListener(PythonJavaClass):
__javainterfaces__ = ['android/hardware/SensorEventListener']
def __init__(self):
super(ProximitySensorListener, self).__init__()
service = activity.getSystemService(Context.SENSOR_SERVICE)
self.SensorManager = cast('android.hardware.SensorManager', service)
self.sensor = self.SensorManager.getDefaultSensor(
Sensor.TYPE_PROXIMITY)
self.value = None
def enable(self):
self.SensorManager.registerListener(
self, self.sensor,
SensorManager.SENSOR_DELAY_NORMAL
)
def disable(self):
self.SensorManager.unregisterListener(self, self.sensor)
@java_method('(Landroid/hardware/SensorEvent;)V')
def onSensorChanged(self, event):
self.value = event.values[0]
@java_method('(Landroid/hardware/Sensor;I)V')
def onAccuracyChanged(self, sensor, accuracy):
pass
class AndroidProximity(Proximity):
listener = None
def _enable(self, **kwargs):
if not self.listener:
self.listener = ProximitySensorListener()
self.listener.enable()
def _disable(self, **kwargs):
if self.listener:
self.listener.disable()
delattr(self, 'listener')
def _get_proximity(self):
if self.listener:
value = self.listener.value
# value is 0.0 when proxime sensor is covered. In other case
# value is 5.0 because in smartphone, optical proximity sensors
# are used.
return value < 5.0
def instance():
return AndroidProximity()
| mit | 839,760,729,264,712,200 | 28.826087 | 76 | 0.672012 | false |
EvilCult/moviecatcher | View/PlayerView.py | 1 | 6852 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tkinter
import urllib.request, urllib.error, urllib.parse
import ssl
import io
import PIL.Image
import PIL.ImageTk
import tkinter.messagebox
import time
import webbrowser
from selenium import webdriver
from Lib import Tools
class GUI :
def __init__ (self, master) :
self.master = master
self.authDownload = ''
self.watchLinkStat = {'err': 0, 'msg': ''}
self.downLinkStat = {'err': 0, 'msg': ''}
self.Tools = Tools.Tools()
def showDlLink (self, link) :
window = tkinter.Toplevel()
window.title('下载链接')
window.resizable(width = 'false', height = 'false')
if self.Tools.isWin() :
window.iconbitmap(self.Tools.getRes('biticon.ico'))
topZone = tkinter.Frame(window, bd = 0, bg="#444")
topZone.pack(expand = True, fill = 'both')
textZone = tkinter.Text(topZone, height = 8, width = 50, bd = 10, bg="#444", fg = '#ddd', highlightthickness = 0, selectbackground = '#116cd6')
textZone.grid(row = 0, column = 0, sticky = '')
textZone.insert('insert', link)
dlBtn = tkinter.Button(topZone, text = '下载', width = 10, fg = '#222', highlightbackground = '#444', command = lambda url = link : webbrowser.open_new(url))
dlBtn.grid(row = 1, column = 0, pady = 5)
def showWatchLink (self) :
if self.watchLinkStat['err'] == 0 :
if self.watchLinkStat['msg'] == '' :
self.timer = self.master.after(50, self.showWatchLink)
else :
webbrowser.open_new(self.watchLinkStat['msg'])
elif self.watchLinkStat['err'] == 1 :
tkinter.messagebox.showinfo('Error', '云端未能完成该任务,请等待云端下载完成or换个资源试试!')
elif self.watchLinkStat['err'] == 2 :
tkinter.messagebox.showinfo('Notice', '磁力链接目前不支持在线观看,待后续版本更新。\r\n暂时请手动下载或上传链接至百度云!')
elif self.watchLinkStat['err'] == 3 :
self.showAuthCode(self.watchLinkStat['msg'])
def showCloudLink (self) :
if self.downLinkStat['err'] == 0 :
if self.downLinkStat['msg'] == '' :
self.timer = self.master.after(50, self.showCloudLink)
else :
window = tkinter.Toplevel()
window.title('离线下载链接')
window.resizable(width = 'false', height = 'false')
if self.Tools.isWin() :
window.iconbitmap(self.Tools.getRes('biticon.ico'))
topZone = tkinter.Frame(window, bd = 0, bg="#444")
topZone.pack(expand = True, fill = 'both')
textZone = tkinter.Text(topZone, height = 8, width = 50, bd = 10, bg="#444", fg = '#ddd', highlightthickness = 0, selectbackground = '#116cd6')
textZone.grid(row = 0, column = 0, sticky = '')
textZone.insert('insert', self.downLinkStat['msg'])
dlBtn = tkinter.Button(topZone, text = '下载', width = 10, fg = '#222', highlightbackground = '#444', command = lambda url = self.downLinkStat['msg'] : webbrowser.open_new(url))
dlBtn.grid(row = 1, column = 0, pady = 5)
elif self.downLinkStat['err'] == 1 :
tkinter.messagebox.showinfo('Error', '云端未能完成该任务,请等待云端下载完成or换个资源试试!')
elif self.downLinkStat['err'] == 2 :
tkinter.messagebox.showinfo('Notice', '磁力链接目前不支持离线下载,待后续版本更新。\r\n暂时请手动下载或上传链接至百度云!')
elif self.downLinkStat['err'] == 3 :
self.showAuthCode(self.downLinkStat['msg'])
def showAuthCode (self, imgUrl) :
self.authWindow = tkinter.Toplevel()
self.authWindow.title('验证码')
self.authWindow.resizable(width = 'false', height = 'false')
if self.Tools.isWin() :
self.authWindow.iconbitmap(self.Tools.getRes('biticon.ico'))
self.authWindow.config(background='#444')
winTop = tkinter.Frame(self.authWindow, bd = 10, bg = '#444')
winTop.grid(row = 0, column = 0, sticky = '')
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
image = urllib.request.urlopen(imgUrl, context = ctx).read()
imgData = io.BytesIO(image)
pilImage = PIL.Image.open(imgData)
tkImg = PIL.ImageTk.PhotoImage(pilImage)
label = tkinter.Label(winTop, image = tkImg, bd = 0, bg = '#111', relief = 'solid')
label.img = tkImg
label.grid(row = 0, column = 0, sticky = '', pady = 5)
self.authKeyInput = tkinter.Entry(winTop, width = 20, bd = 0, bg = "#222", fg = "#ddd", highlightthickness = 1, highlightcolor="#111", highlightbackground = '#111', justify='center')
self.authKeyInput.grid(row = 1, column = 0, pady = 5)
self.authKeyInput.insert('end', '')
btn = tkinter.Button(winTop, text = '确认', width = 10, fg = '#222', highlightbackground = '#444', command = self.__getAuthInput)
btn.grid(row = 2, column = 0, pady = 5)
def showLoginWindow (self, callback = '') :
loginUrl = 'https://pan.baidu.com/'
if self.Tools.isWin() :
chromeDriver = self.Tools.getRes('chromedriver.exe')
else :
chromeDriver = self.Tools.getRes('chromedriver')
# try:
self.browser = webdriver.Chrome(executable_path = chromeDriver)
self.browser.get(loginUrl)
self.browser.maximize_window()
self.slave = tkinter.Toplevel()
self.slave.title('Login')
self.slave.resizable(width = 'false', height = 'false')
if self.Tools.isWin() :
self.slave.iconbitmap(self.Tools.getRes('biticon.ico'))
mainFrame = tkinter.Frame(self.slave, bd = 0, bg="#444")
mainFrame.pack(expand = True, fill = 'both', ipadx = '10')
msgLabel = tkinter.Label(mainFrame, text="请于页面中登陆百度云账号\r\n登陆成功后点击下方「获取cookies」按钮", fg = '#ddd', bg="#444", anchor = 'center')
msgLabel.grid(row = 0, column = 1, pady = 5)
loginBtn = tkinter.Button(mainFrame, text = '获取cookies', width = 20, fg = '#222', highlightbackground = '#444', command = lambda cb = callback : self.__getLoginInput(cb))
loginBtn.grid(row = 4, column = 1, pady = 5)
mainFrame.grid_columnconfigure(0, weight=1)
mainFrame.grid_columnconfigure(2, weight=1)
# except Exception as e:
# tkMessageBox.showinfo('Notice', '为保障密码安全:登陆功能将完全在Chrome浏览器中进行。\r\n所以需要Chrome支持。\r\n请先安装Google Chrome浏览器。')
def __getLoginInput (self, callback = '') :
time.sleep(5)
if self.browser.title == '百度网盘-全部文件' :
cookies = self.browser.get_cookies()
cookieStr = ''
for x in cookies :
cookieStr += x['name'] + '=' + x['value'] + '; '
result = {'stat': 1, 'msg': '获取成功'}
else :
result = {'stat': 2, 'msg': '获取失败'}
self.browser.quit()
if result['stat'] == 1 :
self.slave.destroy()
tkinter.messagebox.showinfo('Success', '登陆成功')
callback(cookieStr)
else :
tkinter.messagebox.showinfo('Error', result['msg'])
def __getAuthInput (self) :
authKey = self.authKeyInput.get()
self.authDownload(authKey)
self.authWindow.destroy()
| mit | -659,800,920,965,595,600 | 36.093023 | 184 | 0.666771 | false |
miti0/mosquito | utils/postman.py | 1 | 2056 | import smtplib
import configargparse
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from premailer import transform
class Postman:
"""
Simple email/postman module
! Currently supported only for gmail
"""
arg_parser = configargparse.get_argument_parser()
arg_parser.add('--mail_username', help='Email username (supported only gmail)')
arg_parser.add("--mail_password", help='Email password (supported only gmail)')
arg_parser.add("--mail_recipients", help='Email recipients')
def __init__(self):
self.args = self.arg_parser.parse_known_args()[0]
self.username = self.args.mail_username
self.password = self.args.mail_password
self.recipients = self.args.mail_recipients
def send_mail(self, subject, body):
"""
Send email to configured account with given subject and body
"""
mail_from = self.username
# mail_to = self.recipients if type(self.recipients) is list else [self.recipients]
mail_to = self.recipients
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = mail_from
msg['To'] = mail_to
# body = self.html_style() + body
# msg.attach(MIMEText(body, 'html'))
body = transform(body)
#body = '<html> <h1 style="font-weight:bolder; border:1px solid black">Peter</h1> <p style="color:red">Hej</p> </html>'
msg.attach(MIMEText(body, 'html'))
mail = smtplib.SMTP("smtp.gmail.com", 587)
mail.ehlo()
mail.starttls()
mail.login(self.username, self.password)
mail.sendmail(mail_from, mail_to, msg.as_string())
mail.close()
print('mail successfully sent')
@staticmethod
def html_style():
"""
Email css styles
"""
style = '''
<style>
#headings {
font-size:26px !important;
line-height:32px !important;
}
</style>
'''
return style
| gpl-3.0 | 7,558,057,738,645,854,000 | 31.634921 | 127 | 0.601654 | false |
weidenba/recovery_sort | helper/meta.py | 1 | 1540 | from common_helper_files import get_binary_from_file
from hashlib import sha256
import os
import time
import logging
import magic
import sys
def generate_uid(file_path):
file_data = get_binary_from_file(file_path)
if file_data == b'' or type(file_data) is not bytes:
return "0_0"
file_hash = sha256(file_data).hexdigest()
file_size = get_file_size(file_path)
return "{}_{}".format(file_hash, file_size)
def get_modification_date(file_path):
'''
Return a string of the modification date: YYYY-MM-DD
'''
try:
mod_date = os.path.getmtime(file_path)
mod_date = time.localtime(mod_date)
return time.strftime('%Y-%m-%d', mod_date)
except Exception as e:
logging.error('Could not get timestamp: {} {}'.format(sys.exc_info()[0].__name__, e))
return '0'
def get_file_size(file_path):
'''
Returns size of a file in bytes
'''
try:
return os.path.getsize(file_path)
except Exception as e:
logging.error('Could not get file size: {} {}'.format(sys.exc_info()[0].__name__, e))
return 0
def get_file_name(file_path):
'''
Returns a the file name
'''
file_name = file_path.split('/')[-1:][0]
return file_name
def get_file_mime(file_path):
'''
Returns the mime_type of a file
'''
try:
return magic.from_file(file_path, mime=True)
except Exception as e:
logging.error('Could not get file type: {} {}'.format(sys.exc_info()[0].__name__, e))
return 'unknown'
| gpl-3.0 | 581,053,022,200,500,400 | 25.101695 | 93 | 0.611688 | false |
geosolutions-it/geonode | geonode/security/models.py | 1 | 19572 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import logging
import traceback
import operator
from functools import reduce
from django.db.models import Q
from django.conf import settings
from django.db import transaction
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from geonode.groups.conf import settings as groups_settings
from guardian.shortcuts import (
assign_perm,
get_anonymous_user,
get_groups_with_perms,
get_perms
)
from geonode.groups.models import GroupProfile
from .permissions import (
ADMIN_PERMISSIONS,
LAYER_ADMIN_PERMISSIONS,
VIEW_PERMISSIONS,
)
from .utils import (
get_users_with_perms,
set_owner_permissions,
remove_object_permissions,
purge_geofence_layer_rules,
sync_geofence_with_guardian,
get_user_obj_perms_model
)
logger = logging.getLogger("geonode.security.models")
class PermissionLevelError(Exception):
pass
class PermissionLevelMixin(object):
"""
Mixin for adding "Permission Level" methods
to a model class -- eg role systems where a
user has exactly one assigned role with respect to
an object representing an "access level"
"""
def get_all_level_info(self):
resource = self.get_self_resource()
users = get_users_with_perms(resource)
groups = get_groups_with_perms(
resource,
attach_perms=True)
if groups:
for group in groups:
try:
group_profile = GroupProfile.objects.get(slug=group.name)
managers = group_profile.get_managers()
if managers:
for manager in managers:
if manager not in users and not manager.is_superuser and \
manager != resource.owner:
for perm in ADMIN_PERMISSIONS + VIEW_PERMISSIONS:
assign_perm(perm, manager, resource)
users[manager] = ADMIN_PERMISSIONS + VIEW_PERMISSIONS
except GroupProfile.DoesNotExist:
tb = traceback.format_exc()
logger.debug(tb)
if resource.group:
try:
group_profile = GroupProfile.objects.get(slug=resource.group.name)
managers = group_profile.get_managers()
if managers:
for manager in managers:
if manager not in users and not manager.is_superuser and \
manager != resource.owner:
for perm in ADMIN_PERMISSIONS + VIEW_PERMISSIONS:
assign_perm(perm, manager, resource)
users[manager] = ADMIN_PERMISSIONS + VIEW_PERMISSIONS
except GroupProfile.DoesNotExist:
tb = traceback.format_exc()
logger.debug(tb)
info = {
'users': users,
'groups': groups}
try:
if hasattr(self, "layer"):
info_layer = {
'users': get_users_with_perms(
self.layer),
'groups': get_groups_with_perms(
self.layer,
attach_perms=True)}
for user in info_layer['users']:
if user in info['users']:
info['users'][user] = info['users'][user] + info_layer['users'][user]
else:
info['users'][user] = info_layer['users'][user]
for group in info_layer['groups']:
if group in info['groups']:
info['groups'][group] = list(dict.fromkeys(info['groups'][group] + info_layer['groups'][group]))
else:
info['groups'][group] = info_layer['groups'][group]
except Exception:
tb = traceback.format_exc()
logger.debug(tb)
return info
def get_self_resource(self):
try:
if hasattr(self, "resourcebase_ptr_id"):
return self.resourcebase_ptr
except ObjectDoesNotExist:
pass
return self
@transaction.atomic
def set_default_permissions(self, owner=None):
"""
Remove all the permissions except for the owner and assign the
view permission to the anonymous group
"""
remove_object_permissions(self)
# default permissions for anonymous users
def skip_registered_members_common_group(user_group):
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_NAME:
_members_group_name = groups_settings.REGISTERED_MEMBERS_GROUP_NAME
if (settings.RESOURCE_PUBLISHING or settings.ADMIN_MODERATE_UPLOADS) and \
_members_group_name == user_group.name:
return True
return False
anonymous_group, created = Group.objects.get_or_create(name='anonymous')
# default permissions for owner
_owner = owner or self.owner
user_groups = Group.objects.filter(
name__in=_owner.groupmember_set.all().values_list("group__slug", flat=True))
obj_group_managers = []
if user_groups:
for _user_group in user_groups:
if not skip_registered_members_common_group(Group.objects.get(name=_user_group)):
try:
_group_profile = GroupProfile.objects.get(slug=_user_group)
managers = _group_profile.get_managers()
if managers:
for manager in managers:
if manager not in obj_group_managers and not manager.is_superuser:
obj_group_managers.append(manager)
except GroupProfile.DoesNotExist:
tb = traceback.format_exc()
logger.debug(tb)
if not anonymous_group:
raise Exception("Could not acquire 'anonymous' Group.")
# default permissions for resource owner
set_owner_permissions(self, members=obj_group_managers)
# Anonymous
anonymous_can_view = settings.DEFAULT_ANONYMOUS_VIEW_PERMISSION
if anonymous_can_view:
assign_perm('view_resourcebase',
anonymous_group, self.get_self_resource())
else:
for user_group in user_groups:
if not skip_registered_members_common_group(user_group):
assign_perm('view_resourcebase',
user_group, self.get_self_resource())
anonymous_can_download = settings.DEFAULT_ANONYMOUS_DOWNLOAD_PERMISSION
if anonymous_can_download:
assign_perm('download_resourcebase',
anonymous_group, self.get_self_resource())
else:
for user_group in user_groups:
if not skip_registered_members_common_group(user_group):
assign_perm('download_resourcebase',
user_group, self.get_self_resource())
if self.__class__.__name__ == 'Layer':
# only for layer owner
assign_perm('change_layer_data', _owner, self)
assign_perm('change_layer_style', _owner, self)
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
purge_geofence_layer_rules(self.get_self_resource())
# Owner & Managers
perms = [
"view_resourcebase",
"change_layer_data",
"change_layer_style",
"change_resourcebase",
"change_resourcebase_permissions",
"download_resourcebase"]
sync_geofence_with_guardian(self.layer, perms, user=_owner)
for _group_manager in obj_group_managers:
sync_geofence_with_guardian(self.layer, perms, user=_group_manager)
for user_group in user_groups:
if not skip_registered_members_common_group(user_group):
sync_geofence_with_guardian(self.layer, perms, group=user_group)
# Anonymous
perms = ["view_resourcebase"]
if anonymous_can_view:
sync_geofence_with_guardian(self.layer, perms, user=None, group=None)
perms = ["download_resourcebase"]
if anonymous_can_download:
sync_geofence_with_guardian(self.layer, perms, user=None, group=None)
@transaction.atomic
def set_permissions(self, perm_spec, created=False):
"""
Sets an object's the permission levels based on the perm_spec JSON.
the mapping looks like:
{
'users': {
'AnonymousUser': ['view'],
<username>: ['perm1','perm2','perm3'],
<username2>: ['perm1','perm2','perm3']
...
}
'groups': [
<groupname>: ['perm1','perm2','perm3'],
<groupname2>: ['perm1','perm2','perm3'],
...
]
}
"""
remove_object_permissions(self)
# default permissions for resource owner
set_owner_permissions(self)
# Anonymous User group
if 'users' in perm_spec and "AnonymousUser" in perm_spec['users']:
anonymous_group = Group.objects.get(name='anonymous')
for perm in perm_spec['users']['AnonymousUser']:
if self.polymorphic_ctype.name == 'layer' and perm in ('change_layer_data', 'change_layer_style',
'add_layer', 'change_layer', 'delete_layer',):
assign_perm(perm, anonymous_group, self.layer)
else:
assign_perm(perm, anonymous_group, self.get_self_resource())
# Owner
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
if not created:
purge_geofence_layer_rules(self.get_self_resource())
perms = [
"view_resourcebase",
"change_layer_data",
"change_layer_style",
"change_resourcebase",
"change_resourcebase_permissions",
"download_resourcebase"]
sync_geofence_with_guardian(self.layer, perms, user=self.owner)
# All the other users
if 'users' in perm_spec and len(perm_spec['users']) > 0:
for user, perms in perm_spec['users'].items():
_user = get_user_model().objects.get(username=user)
if _user != self.owner and user != "AnonymousUser":
for perm in perms:
if self.polymorphic_ctype.name == 'layer' and perm in (
'change_layer_data', 'change_layer_style',
'add_layer', 'change_layer', 'delete_layer',):
assign_perm(perm, _user, self.layer)
else:
assign_perm(perm, _user, self.get_self_resource())
# Set the GeoFence Rules
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
group_perms = None
if 'groups' in perm_spec and len(perm_spec['groups']) > 0:
group_perms = perm_spec['groups']
sync_geofence_with_guardian(self.layer, perms, user=_user, group_perms=group_perms)
# All the other groups
if 'groups' in perm_spec and len(perm_spec['groups']) > 0:
for group, perms in perm_spec['groups'].items():
_group = Group.objects.get(name=group)
for perm in perms:
if self.polymorphic_ctype.name == 'layer' and perm in (
'change_layer_data', 'change_layer_style',
'add_layer', 'change_layer', 'delete_layer',):
assign_perm(perm, _group, self.layer)
else:
assign_perm(perm, _group, self.get_self_resource())
# Set the GeoFence Rules
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
if _group and _group.name and _group.name == 'anonymous':
_group = None
sync_geofence_with_guardian(self.layer, perms, group=_group)
# AnonymousUser
if 'users' in perm_spec and len(perm_spec['users']) > 0:
if "AnonymousUser" in perm_spec['users']:
_user = get_anonymous_user()
perms = perm_spec['users']["AnonymousUser"]
for perm in perms:
if self.polymorphic_ctype.name == 'layer' and perm in (
'change_layer_data', 'change_layer_style',
'add_layer', 'change_layer', 'delete_layer',):
assign_perm(perm, _user, self.layer)
else:
assign_perm(perm, _user, self.get_self_resource())
# Set the GeoFence Rules (user = None)
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
sync_geofence_with_guardian(self.layer, perms)
@transaction.atomic
def set_workflow_perms(self, approved=False, published=False):
"""
| N/PUBLISHED | PUBLISHED
--------------------------------------------
N/APPROVED | GM/OWR | -
APPROVED | registerd | all
--------------------------------------------
"""
anonymous_group = Group.objects.get(name='anonymous')
if approved:
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_NAME:
_members_group_name = groups_settings.REGISTERED_MEMBERS_GROUP_NAME
_members_group_group = Group.objects.get(name=_members_group_name)
for perm in VIEW_PERMISSIONS:
assign_perm(perm,
_members_group_group, self.get_self_resource())
# Set the GeoFence Rules (user = None)
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
sync_geofence_with_guardian(self.layer, VIEW_PERMISSIONS, group=_members_group_group)
else:
for perm in VIEW_PERMISSIONS:
assign_perm(perm,
anonymous_group, self.get_self_resource())
# Set the GeoFence Rules (user = None)
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
sync_geofence_with_guardian(self.layer, VIEW_PERMISSIONS)
if published:
for perm in VIEW_PERMISSIONS:
assign_perm(perm,
anonymous_group, self.get_self_resource())
# Set the GeoFence Rules (user = None)
if settings.OGC_SERVER['default'].get("GEOFENCE_SECURITY_ENABLED", False):
if self.polymorphic_ctype.name == 'layer':
sync_geofence_with_guardian(self.layer, VIEW_PERMISSIONS)
def get_user_perms(self, user):
"""
Returns a list of permissions a user has on a given resource
"""
# To avoid circular import
from geonode.base.models import Configuration
config = Configuration.load()
ctype = ContentType.objects.get_for_model(self)
PERMISSIONS_TO_FETCH = VIEW_PERMISSIONS + ADMIN_PERMISSIONS + LAYER_ADMIN_PERMISSIONS
resource_perms = Permission.objects.filter(
codename__in=PERMISSIONS_TO_FETCH,
content_type_id=ctype.id
).values_list('codename', flat=True)
# Don't filter for admin users
if not (user.is_superuser or user.is_staff):
user_model = get_user_obj_perms_model(self)
user_resource_perms = user_model.objects.filter(
object_pk=self.pk,
content_type_id=ctype.id,
user__username=str(user),
permission__codename__in=resource_perms
)
# get user's implicit perms for anyone flag
implicit_perms = get_perms(user, self)
resource_perms = user_resource_perms.union(
user_model.objects.filter(permission__codename__in=implicit_perms)
).values_list('permission__codename', flat=True)
# filter out permissions for edit, change or publish if readonly mode is active
perm_prefixes = ['change', 'delete', 'publish']
if config.read_only:
clauses = (Q(codename__contains=prefix) for prefix in perm_prefixes)
query = reduce(operator.or_, clauses)
if (user.is_superuser or user.is_staff):
resource_perms = resource_perms.exclude(query)
else:
perm_objects = Permission.objects.filter(codename__in=resource_perms)
resource_perms = perm_objects.exclude(query).values_list('codename', flat=True)
return resource_perms
def user_can(self, user, permission):
"""
Checks if a has a given permission to the resource
"""
resource = self.get_self_resource()
user_perms = self.get_user_perms(user).union(resource.get_user_perms(user))
if permission not in user_perms:
# TODO cater for permissions with syntax base.permission_codename
# eg 'base.change_resourcebase'
return False
return True
| gpl-3.0 | 5,339,928,737,404,739,000 | 42.785235 | 120 | 0.540773 | false |
paulthulstrup/moose | modules/thermopower_diffusion/thermopower_geometry.py | 1 | 8670 | import os, subprocess, re, sys
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Modify mesh size for all , we set:
# - T_fridge = 0.005
# - T_hot = 0.3
def writeMooseInput(mesh_n):
Values = {
'mesh_name': mesh_n
}
# First part is reading the text file with
Lines = [line.rstrip('\n') for line in open('./input_file_geovar.txt')]
# Write a list tuple {line number thing to change}
Lines_to_change = {
'1': "mesh_name",
}
filename = "./thermopower_diffusion.i"
os.remove(filename)
content = ''
for i in range(len(Lines)):
l = Lines[i]
key = str(i)
if key in Lines_to_change:
l += Values[Lines_to_change[key]] + "'"
content += l
content += '\n'
with open(filename, 'w+') as f2:
f2.write(content + os.linesep)
# Run the Moose simulation
def runMoose():
run_cmd = "sh ./run_sim_thermopower.sh"
subprocess.call(run_cmd, shell=True)
# Cleans the variable to rturn an array of floats
def clean_var(var):
temp = re.sub('', '', var[0])
mylist = temp.split(',')
res = []
for i in range(len(mylist)):
s = mylist[i]
res.append(re.sub('[\s+]', '', s))
res = [float(i) for i in res]
return res
# Set up environment variable
# meshes = ['advanced_L_2.msh', 'advanced_L_4.msh', 'advanced_L_6.msh', 'advanced_L_9.msh',
# 'advanced_L_10.msh', 'advanced_L_11.msh', 'advanced_L_13.msh', 'advanced_L_20.msh',
# 'advanced_L_30.msh', 'advanced_L_40.msh', 'advanced_L_100.msh']
# meshes_length = [2, 4, 6, 9, 10, 11, 13, 20, 30, 40, 100]
meshes = ['rectangle2.msh', 'rectangle2-5.msh', 'rectangle3.msh', 'rectangle3-5.msh', 'rectangle4.msh', 'rectangle6.msh',
'rectangle8.msh', 'rectangle10.msh']
meshes_length = [2, 2.5, 3, 3.5, 4, 6, 8, 10]
result1 = []
result2 = []
result3 = []
result4 = []
result5 = []
for i in range(len(meshes)):
mesh = meshes[i]
writeMooseInput(mesh)
runMoose()
# Loads the data from the nbdcump function
f = open("out.txt", 'r')
data = f.read()
x = re.findall(r'coordx =(.*?);', data, re.DOTALL)
x_node = clean_var(x)
y = re.findall(r'coordy =(.*?);', data, re.DOTALL)
y_node = clean_var(y)
nodes = np.array(zip(x_node, y_node))
T = re.findall(r'vals_nod_var1 =(.*?);', data, re.DOTALL)
val_T = np.sqrt(clean_var(T))
# Interpolation (Linear or Cubic)
# Need to define the domain properly on which we interpolate
from scipy.interpolate import griddata
if meshes_length[i] == 2:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):100j,
min(y_node):max(y_node):100j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 50])
result2.append(grid_T1[30, 50])
result3.append(grid_T1[50, 50])
result4.append(grid_T1[70, 50])
result5.append(grid_T1[90, 50])
if meshes_length[i] == 2.5:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):125j,
min(y_node):max(y_node):125j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 62])
result2.append(grid_T1[30, 62])
result3.append(grid_T1[50, 62])
result4.append(grid_T1[70, 62])
result5.append(grid_T1[90, 62])
elif meshes_length[i] == 3:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):150j,
min(y_node):max(y_node):150j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 75])
result2.append(grid_T1[30, 75])
result3.append(grid_T1[50, 75])
result4.append(grid_T1[70, 75])
result5.append(grid_T1[90, 75])
elif meshes_length[i] == 3.5:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):175j,
min(y_node):max(y_node):175j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 87])
result2.append(grid_T1[30, 87])
result3.append(grid_T1[50, 87])
result4.append(grid_T1[70, 87])
result5.append(grid_T1[90, 87])
elif meshes_length[i] == 4:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):200j,
min(y_node):max(y_node):200j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 100])
result2.append(grid_T1[30, 100])
result3.append(grid_T1[50, 100])
result4.append(grid_T1[70, 100])
result5.append(grid_T1[90, 100])
elif meshes_length[i] == 6:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):300j,
min(y_node):max(y_node):300j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 150])
result2.append(grid_T1[30, 150])
result3.append(grid_T1[50, 150])
result4.append(grid_T1[70, 150])
result5.append(grid_T1[90, 150])
elif meshes_length[i] == 8:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):400j,
min(y_node):max(y_node):400j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 200])
result2.append(grid_T1[30, 200])
result3.append(grid_T1[50, 200])
result4.append(grid_T1[70, 200])
result5.append(grid_T1[90, 200])
elif meshes_length[i] == 9:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):450j,
min(y_node):max(y_node):450j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[33, 225])
elif meshes_length[i] == 10:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):500j,
min(y_node):max(y_node):500j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[10, 250])
result2.append(grid_T1[30, 250])
result3.append(grid_T1[50, 250])
result4.append(grid_T1[70, 250])
result5.append(grid_T1[90, 250])
elif meshes_length[i] == 11:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):550j,
min(y_node):max(y_node):550j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[33, 275])
elif meshes_length[i] == 13:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):650j,
min(y_node):max(y_node):650j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[33, 325])
elif meshes_length[i] == 20:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):1000j,
min(y_node):max(y_node):1000j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[12, 500])
elif meshes_length[i] == 30:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):1500j,
min(y_node):max(y_node):1500j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[12, 750])
elif meshes_length[i] == 40:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):2000j,
min(y_node):max(y_node):2000j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[12, 1000])
elif meshes_length[i] == 100:
grid_x, grid_y = np.mgrid[min(x_node):max(x_node):5000j,
min(y_node):max(y_node):5000j] # here we manually define the range of the mesh
grid_T1 = griddata(nodes, val_T, (grid_x, grid_y), method='cubic')
result1.append(grid_T1[12, 2500])
print result5
| lgpl-2.1 | -392,985,355,593,617,200 | 36.695652 | 121 | 0.57451 | false |
bzhou26/leetcode_sol | p20_Valid_Parentheses.py | 1 | 1041 | '''
- Leetcode problem: 20
- Difficulty: Easy
- Brief problem description:
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
An input string is valid if:
Open brackets must be closed by the same type of brackets.
Open brackets must be closed in the correct order.
Note that an empty string is also considered valid.
Example 1:
Input: "()"
Output: true
Example 2:
Input: "()[]{}"
Output: true
Example 3:
Input: "(]"
Output: false
Example 4:
Input: "([)]"
Output: false
Example 5:
Input: "{[]}"
Output: true
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
class Solution:
def isValid(self, s: str) -> bool:
pStack = []
for c in s:
if c == "{":
pStack.append("}")
elif c == "[":
pStack.append("]")
elif c == "(":
pStack.append(")")
elif len(pStack) == 0 or pStack.pop() != c:
return False
return len(pStack) == 0 | mit | -6,117,799,411,657,766,000 | 16.965517 | 118 | 0.558117 | false |
ryanGT/sympy | sympy/polys/wrappers.py | 1 | 2095 |
from polynomial import Poly
def LexPoly(*args):
"""Returns a polynomial with lexicographic order of terms. """
return Poly(*args, **{ 'order' : 'lex' })
from algorithms import poly_div, poly_pdiv, poly_groebner, poly_lcm, poly_gcd, \
poly_half_gcdex, poly_gcdex, poly_sqf, poly_resultant, poly_subresultants, \
poly_decompose, poly_quo, poly_rem, poly_pquo, poly_prem
from rootfinding import poly_root_factors, poly_sturm
def _conv_args(n, args):
symbols = args[n:]
if len(symbols) == 1 and isinstance(symbols[0], (tuple, list)):
return args[:n] + tuple(symbols[0])
else:
return args
def _map_basic(f, n, *args, **kwargs):
result = f(*_conv_args(n, args), **kwargs)
if isinstance(result, (list, tuple, set)):
return result.__class__(g.as_basic() for g in result)
else:
return result.as_basic()
_funcs = {
'quo' : 2,
'rem' : 2,
'pdiv' : 2,
'pquo' : 2,
'prem' : 2,
'groebner' : 1,
'lcm' : 2,
'gcd' : 2,
'gcdex' : 2,
'half_gcdex' : 2,
'resultant' : 2,
'sqf' : 1,
'decompose' : 1,
'root_factors' : 1,
'sturm' : 1,
}
_func_def = \
"""
def %s(*args, **kwargs):
return _map_basic(poly_%s, %d, *args, **kwargs)
%s.__doc__ = poly_%s.__doc__
"""
for _func, _n in _funcs.iteritems():
exec _func_def % (_func, _func, _n, _func, _func)
def div(*args, **kwargs):
q, r = poly_div(*_conv_args(2, args), **kwargs)
if type(q) is not list:
q = q.as_basic()
else:
q = [ p.as_basic() for p in q ]
return q, r.as_basic()
div.__doc__ = poly_div.__doc__
def subresultants(*args, **kwargs):
result = poly_subresultants(*_conv_args(2, args), **kwargs)
if type(result) is tuple:
res, R = result
else:
res, R = None, result
R = [ r.as_basic() for r in R ]
if res is None:
return R
else:
return res.as_basic(), R
subresultants.__doc__ = poly_subresultants.__doc__
| bsd-3-clause | 5,509,448,400,583,074,000 | 23.08046 | 80 | 0.528401 | false |
syilmazturk/ZomatoNightlifeMapper | test/test_zomato_mapper_dialog.py | 1 | 1573 | # coding=utf-8
"""Dialog test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__date__ = '2016-06-16'
__copyright__ = 'Copyright 2016, Serhat YILMAZTURK'
import unittest
from PyQt4.QtGui import QDialogButtonBox, QDialog
from zomato_mapper_dialog import ZomatoNightlifeMapperDialog
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class ZomatoNightlifeMapperDialogTest(unittest.TestCase):
"""Test dialog works."""
def setUp(self):
"""Runs before each test."""
self.dialog = ZomatoNightlifeMapperDialog(None)
def tearDown(self):
"""Runs after each test."""
self.dialog = None
def test_dialog_ok(self):
"""Test we can click OK."""
button = self.dialog.button_box.button(QDialogButtonBox.Ok)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Accepted)
def test_dialog_cancel(self):
"""Test we can click cancel."""
button = self.dialog.button_box.button(QDialogButtonBox.Cancel)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Rejected)
if __name__ == "__main__":
suite = unittest.makeSuite(ZomatoNightlifeMapperDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| mit | -8,329,230,998,786,625,000 | 27.6 | 78 | 0.678322 | false |
openstack/python-designateclient | designateclient/v2/cli/service_statuses.py | 1 | 2982 | # Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Author: Endre Karlson <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from osc_lib.command import command
from designateclient import utils
from designateclient.v2.cli import common
from designateclient.v2 import utils as v2_utils
LOG = logging.getLogger(__name__)
def _format_status(status):
status.pop("links", None)
# Remove unneeded fields for display output formatting
for k in ("capabilities", "stats"):
status[k] = "\n".join(status[k]) if status[k] else "-"
return status
class ListServiceStatusesCommand(command.Lister):
"""List service statuses"""
columns = ['id', 'hostname', 'service_name', 'status', 'stats',
'capabilities']
def get_parser(self, prog_name):
parser = super(ListServiceStatusesCommand, self).get_parser(prog_name)
parser.add_argument("--hostname", help="Hostname", required=False)
parser.add_argument("--service_name", help="Service Name",
required=False)
parser.add_argument("--status", help="Status", required=False)
common.add_all_common_options(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.dns
common.set_all_common_headers(client, parsed_args)
cols = self.columns
criterion = {}
for i in ["hostname", "service_name", "status"]:
v = getattr(parsed_args, i)
if v is not None:
criterion[i] = v
data = v2_utils.get_all(client.service_statuses.list,
criterion=criterion)
for i, s in enumerate(data):
data[i] = _format_status(s)
return cols, (utils.get_item_properties(s, cols) for s in data)
class ShowServiceStatusCommand(command.ShowOne):
"""Show service status details"""
def get_parser(self, prog_name):
parser = super(ShowServiceStatusCommand, self).get_parser(prog_name)
parser.add_argument('id', help="Service Status ID")
common.add_all_common_options(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.dns
common.set_all_common_headers(client, parsed_args)
data = client.service_statuses.get(parsed_args.id)
_format_status(data)
return zip(*sorted(data.items()))
| apache-2.0 | -2,124,062,861,226,373,000 | 31.064516 | 78 | 0.65996 | false |
rfancn/wxgigo | contrib/admin/admin/plugin/views.py | 1 | 2834 | #!/usr/bin/env python
# coding=utf-8
"""
Copyright (C) 2010-2013, Ryan Fan <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Library General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
from __future__ import absolute_import
import json
import logging
from django.http import HttpResponse
from django.views.generic import View, TemplateView
from contrib.admin import celery_call
logger = logging.getLogger(__name__)
class WXMPPluginActionView(View):
EMPTY_RESULT = HttpResponse(json.dumps(None), content_type="application/json")
def get_meta(self):
plugins_meta_list = celery_call('api.plugin.load_meta')
if not isinstance(plugins_meta_list, list):
logger.debug(plugins_meta_list)
raise Exception("Failed to load plugin meta list!")
return HttpResponse(json.dumps(plugins_meta_list), content_type="application/json")
def get_config(self, plugin_name):
config_list = celery_call('api.plugin.load_config', (plugin_name,))
if not isinstance(config_list, list):
logger.debug(config_list)
raise Exception("Failed to load plugin config list!")
return HttpResponse(json.dumps(config_list), content_type="application/json")
def get(self, request):
"""
Handle get requests
"""
if request.path.endswith('load_meta/'):
return self.get_meta()
elif request.path.endswith('load_config/'):
plugin_name = request.GET.get('name')
if not plugin_name:
return self.EMPTY_RESULT
return self.get_config(plugin_name)
return self.EMPTY_RESULT
def post(self, request):
"""
Save plugin meta or config
"""
http_post = request.POST
logger.debug(type(http_post))
if not isinstance(http_post, dict):
raise Exception("Invalid plugin meta info!")
response = celery_call('api.plugin.save', (http_post,))
return HttpResponse(json.dumps(response), content_type="application/json")
class PluginListView(TemplateView):
template_name = "plugin_list.html"
| mit | -4,528,838,298,368,841,700 | 33.425 | 91 | 0.659845 | false |
babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/lib2to3/tests/data/py2_test_grammar.py | 1 | 31953 | # Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
# NOTE: When you run this test as a script from the command line, you
# get warnings about certain hex/oct constants. Since those are
# issued by the parser, you can't suppress them by adding a
# filterwarnings() call to this module. Therefore, to shut up the
# regression test, the filterwarnings() call has been added to
# regrtest.py.
from test.test_support import run_unittest, check_syntax_error
import unittest
import sys
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
def testBackslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEquals(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEquals(x, 0, 'backslash ending comment')
def testPlainIntegers(self):
self.assertEquals(0xff, 255)
self.assertEquals(0377, 255)
self.assertEquals(2147483647, 017777777777)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxint
if maxint == 2147483647:
self.assertEquals(-2147483647-1, -020000000000)
# XXX -2147483648
self.assert_(037777777777 > 0)
self.assert_(0xffffffff > 0)
for s in '2147483648', '040000000000', '0x100000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxint == 9223372036854775807:
self.assertEquals(-9223372036854775807-1, -01000000000000000000000)
self.assert_(01777777777777777777777 > 0)
self.assert_(0xffffffffffffffff > 0)
for s in '9223372036854775808', '02000000000000000000000', \
'0x10000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxint value %r' % maxint)
def testLongIntegers(self):
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
def testFloats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def testStringLiterals(self):
x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assert_(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assert_(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEquals(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEquals(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEquals(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEquals(x, y)
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def testEvalInput(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def testFuncdef(self):
### 'def' NAME parameters ':' suite
### parameters: '(' [varargslist] ')'
### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
### | ('**'|'*' '*') NAME)
### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
### fpdef: NAME | '(' fplist ')'
### fplist: fpdef (',' fpdef)* [',']
### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
### argument: [test '='] test # Really [keyword '='] test
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
def f4(two, (compound, (argument, list))): pass
def f5((compound, first), two): pass
self.assertEquals(f2.func_code.co_varnames, ('one_argument',))
self.assertEquals(f3.func_code.co_varnames, ('two', 'arguments'))
if sys.platform.startswith('java'):
self.assertEquals(f4.func_code.co_varnames,
('two', '(compound, (argument, list))', 'compound', 'argument',
'list',))
self.assertEquals(f5.func_code.co_varnames,
('(compound, first)', 'two', 'compound', 'first'))
else:
self.assertEquals(f4.func_code.co_varnames,
('two', '.1', 'compound', 'argument', 'list'))
self.assertEquals(f5.func_code.co_varnames,
('.0', 'two', 'compound', 'first'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
def v3(a, (b, c), *rest): return a, b, c, rest
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
f4(1, (2, (3, 4)))
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
v3(1,(2,3))
v3(1,(2,3),4)
v3(1,(2,3),4,5,6,7,8,9,0)
# ceval unpacks the formal arguments into the first argcount names;
# thus, the names nested inside tuples must appear after these names.
if sys.platform.startswith('java'):
self.assertEquals(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
else:
self.assertEquals(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
self.assertEquals(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
def d31v((x)): pass
d31v(1)
def d32v((x,)): pass
d32v((1,))
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
def testLambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEquals(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0L]]
self.assertEquals(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEquals(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEquals(l5(1, 2), 5)
self.assertEquals(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
check_syntax_error(self, "lambda (None,): None")
### stmt: simple_stmt | compound_stmt
# Tested below
def testSimpleStmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statments that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
# Tested below
def testExprStmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
def testPrintStmt(self):
# 'print' (test ',')* [test]
import StringIO
# Can't test printing to real stdout without comparing output
# which is not available in unittest.
save_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
print 1, 2, 3
print 1, 2, 3,
print
print 0 or 1, 0 or 1,
print 0 or 1
# 'print' '>>' test ','
print >> sys.stdout, 1, 2, 3
print >> sys.stdout, 1, 2, 3,
print >> sys.stdout
print >> sys.stdout, 0 or 1, 0 or 1,
print >> sys.stdout, 0 or 1
# test printing to an instance
class Gulp:
def write(self, msg): pass
gulp = Gulp()
print >> gulp, 1, 2, 3
print >> gulp, 1, 2, 3,
print >> gulp
print >> gulp, 0 or 1, 0 or 1,
print >> gulp, 0 or 1
# test print >> None
def driver():
oldstdout = sys.stdout
sys.stdout = Gulp()
try:
tellme(Gulp())
tellme()
finally:
sys.stdout = oldstdout
# we should see this once
def tellme(file=sys.stdout):
print >> file, 'hello world'
driver()
# we should not see this at all
def tellme(file=None):
print >> file, 'goodbye universe'
driver()
self.assertEqual(sys.stdout.getvalue(), '''\
1 2 3
1 2 3
1 1 1
1 2 3
1 2 3
1 1 1
hello world
''')
sys.stdout = save_stdout
# syntax errors
check_syntax_error(self, 'print ,')
check_syntax_error(self, 'print >> x,')
def testDelStmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def testPassStmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def testBreakStmt(self):
# 'break'
while 1: break
def testContinueStmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo <> 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def testReturn(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax_error(self, "class foo:return 1")
def testYield(self):
check_syntax_error(self, "class foo:yield 1")
def testRaise(self):
# 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def testImport(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def testGlobal(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
def testExec(self):
# 'exec' expr ['in' expr [',' expr]]
z = None
del z
exec 'z=1+1\n'
if z != 2: self.fail('exec \'z=1+1\'\\n')
del z
exec 'z=1+1'
if z != 2: self.fail('exec \'z=1+1\'')
z = None
del z
import types
if hasattr(types, "UnicodeType"):
exec r"""if 1:
exec u'z=1+1\n'
if z != 2: self.fail('exec u\'z=1+1\'\\n')
del z
exec u'z=1+1'
if z != 2: self.fail('exec u\'z=1+1\'')"""
g = {}
exec 'z = 1' in g
if g.has_key('__builtins__'): del g['__builtins__']
if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
g = {}
l = {}
import warnings
warnings.filterwarnings("ignore", "global statement", module="<string>")
exec 'global a; a = 1; b = 2' in g, l
if g.has_key('__builtins__'): del g['__builtins__']
if l.has_key('__builtins__'): del l['__builtins__']
if (g, l) != ({'a':1}, {'b':2}):
self.fail('exec ... in g (%s), l (%s)' %(g,l))
def testAssert(self):
# assert_stmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert 0, "msg"
except AssertionError, e:
self.assertEquals(e.args[0], "msg")
else:
if __debug__:
self.fail("AssertionError not raised by assert 0")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def testIf(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def testWhile(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEquals(x, 2)
def testFor(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def testTry(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr [('as' | ',') expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError, msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError), msg: pass
try: pass
finally: pass
def testSuite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def testTest(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def testComparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 <> 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
def testBinaryMaskOps(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def testShiftOps(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def testAdditiveOps(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def testMultiplicativeOps(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def testUnaryOps(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def testSelectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort()
self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def testAtoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
### dictmaker: test ':' test (',' test ':' test)* [',']
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = `x`
x = `1 or 2 or 3`
self.assertEqual(`1,2`, '(1, 2)')
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def testClassdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x):
x.decorated = True
return x
@class_decorator
class G:
pass
self.assertEqual(G.decorated, True)
def testListcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [None < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
check_syntax_error(self, "[i, s for i in nums for s in strs]")
check_syntax_error(self, "[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def testGenexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(g.next(), [x for x in range(10)])
try:
g.next()
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
g.next()
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
def testComprehensionSpecials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def testIfElseExpr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print x
return ret
self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_main():
run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
test_main()
| mit | -8,453,385,036,111,973,000 | 30.805955 | 132 | 0.460207 | false |
manns/pyspread | pyspread/lib/attrdict.py | 1 | 1060 | # -*- coding: utf-8 -*-
# Copyright Martin Manns
# Distributed under the terms of the GNU General Public License
# --------------------------------------------------------------------
# pyspread is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyspread is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyspread. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------
class AttrDict(dict):
"""Dictionary with attribute access"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
| gpl-3.0 | -3,356,064,808,967,428,600 | 36.857143 | 70 | 0.619811 | false |
google/earthengine-api | python/ee/_cloud_api_utils.py | 1 | 26796 | #!/usr/bin/env python
"""Earth Engine helper functions for working with the Cloud API.
Many of the functions defined here are for mapping legacy calls in ee.data into
their new Cloud API equivalents. This generally requires remapping call
parameters and result values.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import calendar
import copy
import datetime
import re
import warnings
from . import ee_exception
from google_auth_httplib2 import AuthorizedHttp
from google_auth_httplib2 import Request
from googleapiclient import discovery
from googleapiclient import http
from googleapiclient import model
# We use the urllib3-aware shim if it's available.
# It is not available by default if the package is installed via the conda-forge
# channel.
# pylint: disable=g-bad-import-order,g-import-not-at-top
try:
import httplib2shim as httplib2
except ImportError:
import httplib2
import six
# pylint: enable=g-bad-import-order,g-import-not-at-top
# The Cloud API version.
VERSION = 'v1alpha'
PROJECT_ID_PATTERN = (r'^(?:\w+(?:[\w\-]+\.[\w\-]+)*?\.\w+\:)?'
r'[a-z][-a-z0-9]{4,28}[a-z0-9]$')
ASSET_NAME_PATTERN = (r'^projects/((?:\w+(?:[\w\-]+\.[\w\-]+)*?\.\w+\:)?'
r'[a-z][a-z0-9\-]{4,28}[a-z0-9])/assets/(.*)$')
ASSET_ROOT_PATTERN = (r'^projects/((?:\w+(?:[\w\-]+\.[\w\-]+)*?\.\w+\:)?'
r'[a-z][a-z0-9\-]{4,28}[a-z0-9])/assets/?$')
# The default user project to use when making Cloud API calls.
_cloud_api_user_project = None
def _wrap_request(headers_supplier, response_inspector):
"""Builds a callable that wraps an API request.
Args:
headers_supplier: If not None, this will be called for each request and the
resulting dict incorporated into that request's HTTP headers.
response_inspector: If not None, this will be called with an
httplib2.Response containing the HTTP response and body content.
The call happens no matter what the HTTP response status was.
Returns:
Something that can be called in place of the http.HttpRequest constructor
to build an HttpRequest.
"""
if headers_supplier is None and response_inspector is None:
return http.HttpRequest
# pylint: disable=invalid-name
def builder(http_transport,
postproc,
uri,
method='GET',
body=None,
headers=None,
methodId=None,
resumable=None):
"""Builds an HttpRequest, adding headers and response inspection."""
additional_headers = headers_supplier()
if additional_headers:
headers = headers.copy() if headers else {}
headers.update(additional_headers)
request = http.HttpRequest(
http_transport,
postproc,
uri,
method=method,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
if response_inspector:
request.add_response_callback(response_inspector)
return request
return builder
def set_cloud_api_user_project(cloud_api_user_project):
global _cloud_api_user_project
_cloud_api_user_project = cloud_api_user_project
def build_cloud_resource(api_base_url,
api_key=None,
credentials=None,
timeout=None,
headers_supplier=None,
response_inspector=None,
http_transport=None,
raw=False):
"""Builds an Earth Engine Cloud API resource.
Args:
api_base_url: The base URL of the cloud endpoints.
api_key: An API key that's enabled for use with the Earth Engine Cloud API.
credentials: OAuth2 credentials to use when authenticating to the API.
timeout: How long a timeout to set on requests, in seconds.
headers_supplier: A callable that will return a set of headers to be applied
to a request. Will be called once for each request.
response_inspector: A callable that will be invoked with the raw
httplib2.Response responses.
http_transport: An optional custom http_transport to use.
raw: Whether or not to return raw bytes when making method requests.
Returns:
A resource object to use to call the Cloud API.
"""
discovery_service_url = (
'{}/$discovery/rest?version={}&prettyPrint=false'
.format(api_base_url, VERSION))
if http_transport is None:
http_transport = httplib2.Http(timeout=timeout)
if credentials is not None:
http_transport = AuthorizedHttp(credentials, http=http_transport)
request_builder = _wrap_request(headers_supplier, response_inspector)
# Discovery uses json by default.
if raw:
alt_model = model.RawModel()
else:
alt_model = None
def build(**kwargs):
return discovery.build(
'earthengine',
VERSION,
discoveryServiceUrl=discovery_service_url,
developerKey=api_key,
http=http_transport,
requestBuilder=request_builder,
model=alt_model,
cache_discovery=False,
**kwargs) # pytype: disable=wrong-keyword-args
try:
# google-api-python-client made static_discovery the default in version 2,
# but it's not backward-compatible. There's no reliable way to check the
# package version, either.
resource = build(static_discovery=False)
except TypeError:
resource = build()
resource._baseUrl = api_base_url
return resource
def build_cloud_resource_from_document(discovery_document,
http_transport=None,
headers_supplier=None,
response_inspector=None):
"""Builds an Earth Engine Cloud API resource from a description of the API.
This version is intended for use in tests.
Args:
discovery_document: The description of the API.
http_transport: An HTTP transport object to use for the call.
headers_supplier: A callable that will return a set of headers to be applied
to a request. Will be called once for each request.
response_inspector: A callable that will be invoked with the raw
httplib2.Response responses.
Returns:
A resource object to use to call the Cloud API.
"""
request_builder = _wrap_request(headers_supplier, response_inspector)
return discovery.build_from_document(
discovery_document,
http=http_transport,
requestBuilder=request_builder)
def _convert_dict(to_convert,
conversions,
defaults=None,
key_warnings=False,
retain_keys=False):
"""Applies a set of conversion rules to a dict.
Args:
to_convert: A dictionary of key/value pairs to convert.
conversions: A dictionary giving the mapping from key names in "to_convert"
to how those keys and their values will be handled. Key/value pairs in
"to_convert" will be modified in a way that depends on how the key
appears in "conversions". If "to_convert" contains a key/value mapping
of "k"->"v", then:
- If "conversions" contains "k"->"X" then the result will contain
"X"->"v".
- If "conversions" contains "k"->None then the result will not contain an
entry for "k".
- If "conversions" contains "k"->("X", f) then the result will contain
"X"->f("v")
- If "conversions" does not contain an entry for "k" then the result
will not contain an entry for "k" unless retain_keys is true;
if key_warnings is True then a warning will be printed.
- If two or more distinct input keys are converted to the same output key,
one of the resulting values will appear in the result, the others
will be dropped, and a warning will be printed.
defaults: Values to insert in the result if the result of conversion does
not contain these keys.
key_warnings: Whether to print warnings for input keys that are not mapped
to anything in the output.
retain_keys: Whether or not to retain the state of dict. If false, any keys
that don't show up in the conversions dict will be dropped from result.
Returns:
The "to_convert" dict with keys renamed, values converted, and defaults
added.
"""
result = {}
for key, value in six.iteritems(to_convert):
if key in conversions:
conversion = conversions[key]
if conversion is not None:
if isinstance(conversion, tuple):
key = conversion[0]
value = conversion[1](value)
else:
key = conversion
if key in result:
warnings.warn(
'Multiple request parameters converted to {}'.format(key))
result[key] = value
elif retain_keys:
result[key] = value
elif key_warnings:
warnings.warn('Unrecognized key {} ignored'.format(key))
if defaults:
for default_key, default_value in six.iteritems(defaults):
if default_key not in result:
result[default_key] = default_value
return result
def _convert_value(value, conversions, default):
"""Converts a value using a set of value mappings.
Args:
value: The value to convert.
conversions: A dict giving the desired output for each of a set of possible
input values.
default: The value to return if the input value is not one of the ones
listed in "conversions".
Returns:
The converted value.
"""
return conversions.get(value, default)
def _convert_msec_to_timestamp(time_msec):
"""Converts a time value to a google.protobuf.Timestamp's string form.
Args:
time_msec: A time in msec since the Unix epoch.
Returns:
A string formatted like '2003-09-07T19:30:12.345Z', which is the expected
form of google.protobuf.Timestamp values.
"""
return datetime.datetime.utcfromtimestamp(
time_msec / 1000.0).isoformat() + 'Z'
def _convert_timestamp_to_msec(timestamp):
"""Converts a google.protobuf.Timestamp's string form to a time in msec.
Args:
timestamp: A string formatted like '2003-09-07T19:30:12.345Z', which is the
expected form of google.protobuf.Timestamp values.
Returns:
A time in msec since the Unix epoch.
"""
# The fractional second part is optional. Sigh.
if '.' in timestamp:
parsed_timestamp = datetime.datetime.strptime(
timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
parsed_timestamp = datetime.datetime.strptime(
timestamp, '%Y-%m-%dT%H:%M:%SZ')
return (calendar.timegm(parsed_timestamp.utctimetuple()) * 1000 +
int(parsed_timestamp.microsecond / 1000))
def _convert_bounding_box_to_geo_json(bbox):
"""Converts a lng/lat bounding box to a GeoJSON string."""
lng_min = bbox[0]
lat_min = bbox[1]
lng_max = bbox[2]
lat_max = bbox[3]
return ('{{"type":"Polygon","coordinates":'
'[[[{0},{1}],[{2},{1}],[{2},{3}],[{0},{3}],[{0},{1}]]]}}'.format(
lng_min, lat_min, lng_max, lat_max))
def convert_get_list_params_to_list_assets_params(params):
"""Converts a getList params dict to something usable with listAssets."""
return _convert_dict(
params, {
'id': ('parent', convert_asset_id_to_asset_name),
'num': 'pageSize'
}, key_warnings=True)
def convert_list_assets_result_to_get_list_result(result):
"""Converts a listAssets result to something getList can return."""
if 'assets' not in result:
return []
return [_convert_asset_for_get_list_result(i) for i in result['assets']]
def convert_get_list_params_to_list_images_params(params):
"""Converts a getList params dict to something usable with listImages."""
params = _convert_dict(
params, {
'id': ('parent', convert_asset_id_to_asset_name),
'num': 'pageSize',
'starttime': ('startTime', _convert_msec_to_timestamp),
'endtime': ('endTime', _convert_msec_to_timestamp),
'bbox': ('region', _convert_bounding_box_to_geo_json),
'region': 'region',
'filter': 'filter'
},
key_warnings=True)
# getList returns minimal information; we can filter unneeded stuff out
# server-side.
params['view'] = 'BASIC'
return params
def is_asset_root(asset_name):
return bool(re.match(ASSET_ROOT_PATTERN, asset_name))
def convert_list_images_result_to_get_list_result(result):
"""Converts a listImages result to something getList can return."""
if 'images' not in result:
return []
return [_convert_image_for_get_list_result(i) for i in result['images']]
def _convert_asset_for_get_list_result(asset):
"""Converts an EarthEngineAsset to the format returned by getList."""
result = _convert_dict(
asset, {
'name': 'id',
'type': ('type', _convert_asset_type_for_get_list_result)
},
defaults={'type': 'Unknown'})
return result
def _convert_image_for_get_list_result(asset):
"""Converts an Image to the format returned by getList."""
result = _convert_dict(
asset, {
'name': 'id',
},
defaults={'type': 'Image'})
return result
def _convert_asset_type_for_get_list_result(asset_type):
"""Converts an EarthEngineAsset.Type to the format returned by getList."""
return _convert_value(
asset_type, {
'IMAGE': 'Image',
'IMAGE_COLLECTION': 'ImageCollection',
'TABLE': 'Table',
'FOLDER': 'Folder'
}, 'Unknown')
def convert_asset_type_for_create_asset(asset_type):
"""Converts a createAsset asset type to an EarthEngineAsset.Type."""
return _convert_value(
asset_type, {
'Image': 'IMAGE',
'ImageCollection': 'IMAGE_COLLECTION',
'Table': 'TABLE',
'Folder': 'FOLDER'
}, asset_type)
def convert_asset_id_to_asset_name(asset_id):
"""Converts an internal asset ID to a Cloud API asset name.
If asset_id already matches the format 'projects/*/assets/**', it is returned
as-is.
Args:
asset_id: The asset ID to convert.
Returns:
An asset name string in the format 'projects/*/assets/**'.
"""
if re.match(ASSET_NAME_PATTERN, asset_id) or is_asset_root(asset_id):
return asset_id
elif asset_id.split('/')[0] in ['users', 'projects']:
return 'projects/earthengine-legacy/assets/{}'.format(asset_id)
else:
return 'projects/earthengine-public/assets/{}'.format(asset_id)
def split_asset_name(asset_name):
"""Splits an asset name into the parent and ID parts.
Args:
asset_name: The asset ID to split, in the form 'projects/*/assets/**'.
Returns:
The parent ('projects/*') and ID ('**') parts of the name.
"""
projects, parent, _, remainder = asset_name.split('/', 3)
return projects + '/' + parent, remainder
def convert_operation_name_to_task_id(operation_name):
"""Converts an Operation name to a task ID."""
found = re.search(r'^.*operations/(.*)$', operation_name)
return found.group(1) if found else operation_name
def convert_task_id_to_operation_name(task_id):
"""Converts a task ID to an Operation name."""
return 'projects/{}/operations/{}'.format(_cloud_api_user_project, task_id)
def convert_params_to_image_manifest(params):
"""Converts params to an ImageManifest for ingestion."""
return _convert_dict(
params, {
'id': ('name', convert_asset_id_to_asset_name),
'tilesets': ('tilesets', convert_tilesets_to_one_platform_tilesets)
},
retain_keys=True)
def convert_params_to_table_manifest(params):
"""Converts params to a TableManifest for ingestion."""
return _convert_dict(
params, {
'id': ('name', convert_asset_id_to_asset_name),
'sources': ('sources', convert_sources_to_one_platform_sources),
},
retain_keys=True)
def convert_tilesets_to_one_platform_tilesets(tilesets):
"""Converts a tileset to a one platform representation of a tileset."""
converted_tilesets = []
for tileset in tilesets:
converted_tileset = _convert_dict(
tileset,
{'sources': ('sources', convert_sources_to_one_platform_sources)},
retain_keys=True)
converted_tilesets.append(converted_tileset)
return converted_tilesets
def convert_sources_to_one_platform_sources(sources):
"""Converts the sources to one platform representation of sources."""
converted_sources = []
for source in sources:
converted_source = copy.deepcopy(source)
if 'primaryPath' in converted_source:
file_sources = [converted_source['primaryPath']]
if 'additionalPaths' in converted_source:
file_sources += converted_source['additionalPaths']
del converted_source['additionalPaths']
del converted_source['primaryPath']
converted_source['uris'] = file_sources
if 'maxError' in converted_source:
converted_source['maxErrorMeters'] = converted_source['maxError']
del converted_source['maxError']
converted_sources.append(converted_source)
return converted_sources
def encode_number_as_cloud_value(number):
# Numeric values in constantValue-style nodes end up stored in doubles. If the
# input is an integer that loses precision as a double, use the int64 slot
# ("integerValue") in ValueNode.
if (isinstance(number, six.integer_types) and float(number) != number):
return {'integerValue': str(number)}
else:
return {'constantValue': number}
def convert_algorithms(algorithms):
"""Converts a ListAlgorithmsResult to the internal format.
The internal code expects a dict mapping each algorithm's name to a dict
containing:
- description: string
- returns: string
- arguments: list of dicts, each containing
- name: argument name
- type: argument type
- description: argument description (optional)
- optional: bool (optional)
- default: default value (optional)
- hidden: bool (optional)
- preview: bool (optional)
- deprecated: string containing deprecation reason (optional)
Args:
algorithms: A ListAlgorithmResult.
Returns:
A version of that algorithms list that can be interpreted by
apifunction.initialize().
"""
return dict(
_convert_algorithm(algorithm) for algorithm in algorithms['algorithms'])
def _convert_algorithm(algorithm):
"""Converts an Algorithm to the internal format."""
# Strip leading 'algorithms/' from the name.
algorithm_name = algorithm['name'][11:]
converted_algorithm = _convert_dict(
algorithm, {
'description': 'description',
'returnType': 'returns',
'arguments': ('args', _convert_algorithm_arguments),
'hidden': 'hidden',
'preview': 'preview'
},
defaults={
'description': '',
'returns': '',
'args': []
})
if algorithm.get('deprecated'):
converted_algorithm['deprecated'] = algorithm.get('deprecationReason', '')
return algorithm_name, converted_algorithm
def _convert_algorithm_arguments(args):
return [_convert_algorithm_argument(arg) for arg in args]
def _convert_algorithm_argument(arg):
return _convert_dict(
arg, {
'argumentName': 'name',
'type': 'type',
'description': 'description',
'optional': 'optional',
'defaultValue': 'default'
},
defaults={
'description': '',
'type': ''
})
def convert_to_image_file_format(format_str):
"""Converts a legacy file format string to an ImageFileFormat enum value.
Args:
format_str: A string describing an image file format that was passed to
one of the functions in ee.data that takes image file formats.
Returns:
A best guess at the corresponding ImageFileFormat enum name.
"""
if format_str is None:
return 'AUTO_JPEG_PNG'
format_str = format_str.upper()
if format_str == 'JPG':
return 'JPEG'
elif format_str == 'AUTO':
return 'AUTO_JPEG_PNG'
elif format_str == 'GEOTIFF':
return 'GEO_TIFF'
elif format_str == 'TFRECORD':
return 'TF_RECORD_IMAGE'
else:
# It's probably "JPEG" or "PNG", but might be some other supported format.
# Let the server validate it.
return format_str
def convert_to_table_file_format(format_str):
"""Converts a legacy file format string to a TableFileFormat enum value.
Args:
format_str: A string describing a table file format that was passed to
one of the functions in ee.data that takes table file formats.
Returns:
A best guess at the corresponding TableFileFormat enum name.
"""
format_str = format_str.upper()
if format_str == 'GEOJSON':
return 'GEO_JSON'
elif format_str == 'TFRECORD':
return 'TF_RECORD_TABLE'
else:
# It's probably "CSV" or "KML" or one of the others.
# Let the server validate it.
return format_str
def convert_to_band_list(bands):
"""Converts a band list, possibly as CSV, to a real list of bands.
Args:
bands: A list of strings containing band names, or a string containing
a comma-separated list of band names, or None.
Returns:
A list of band names.
"""
if bands is None:
return []
elif isinstance(bands, six.string_types):
return bands.split(',')
elif isinstance(bands, list):
return bands
else:
raise ee_exception.EEException('Invalid band list ' + bands)
def convert_to_visualization_options(params):
"""Extracts a VisualizationOptions from a param dict.
Args:
params: See ee.data.getMapId() for the description of the keys and values
that might appear here.
Returns:
A VisualizationOptions proto, in dict form.
"""
result = {}
if 'palette' in params:
palette = params['palette']
if isinstance(palette, six.string_types):
palette = palette.split(',')
result['paletteColors'] = palette
value_range = len(palette) - 1
else:
value_range = 255
ranges = []
if 'gain' in params or 'bias' in params:
if 'min' in params or 'max' in params:
raise ee_exception.EEException(
'Gain and bias can\'t be specified together with min and max')
# The Cloud API doesn't support gain/bias, only min/max. Extract and
# convert.
gains = _convert_csv_numbers_to_list(params.get('gain'))
biases = _convert_csv_numbers_to_list(params.get('bias'))
if not gains:
gains = [1.0] * len(biases)
elif not biases:
biases = [0.0] * len(gains)
elif len(gains) != len(biases):
raise ee_exception.EEException('Length of gain and bias must match.')
for gain, bias in zip(gains, biases):
# The transformation equations are
# x -> x * gain + bias
# x -> range * (x - min) / (max - min)
# Solving for (min, max) given (gain, bias) gives:
range_min = -bias / gain
range_max = value_range / gain + range_min
ranges.append({'min': range_min, 'max': range_max})
elif 'min' in params or 'max' in params:
mins = _convert_csv_numbers_to_list(params.get('min'))
maxes = _convert_csv_numbers_to_list(params.get('max'))
if not mins:
mins = [0.0] * len(maxes)
elif not maxes:
maxes = [1.0] * len(mins)
elif len(mins) != len(maxes):
raise ee_exception.EEException('Length of min and max must match.')
for range_min, range_max in zip(mins, maxes):
ranges.append({'min': range_min, 'max': range_max})
if ranges:
result['ranges'] = ranges
gammas = _convert_csv_numbers_to_list(params.get('gamma'))
if len(gammas) > 1:
raise ee_exception.EEException('Only one gamma value is supported.')
elif gammas:
result['gamma'] = {'value': gammas[0]}
return result
def _convert_csv_numbers_to_list(value):
"""Converts a string containing CSV numbers to a list."""
if not value:
return []
return [float(x) for x in value.split(',')]
def convert_operation_to_task(operation):
"""Converts an Operation to a legacy Task."""
result = _convert_dict(
operation['metadata'], {
'createTime': ('creation_timestamp_ms', _convert_timestamp_to_msec),
'updateTime': ('update_timestamp_ms', _convert_timestamp_to_msec),
'startTime': ('start_timestamp_ms', _convert_timestamp_to_msec),
'attempt': 'attempt',
'state': ('state', _convert_operation_state_to_task_state),
'description': 'description',
'type': 'task_type',
'destinationUris': 'destination_uris',
})
if operation.get('done'):
if 'error' in operation:
result['error_message'] = operation['error']['message']
result['id'] = convert_operation_name_to_task_id(operation['name'])
result['name'] = operation['name']
return result
def _convert_operation_state_to_task_state(state):
"""Converts a state string from an Operation to the Task equivalent."""
return _convert_value(
state, {
'PENDING': 'READY',
'RUNNING': 'RUNNING',
'CANCELLING': 'CANCEL_REQUESTED',
'SUCCEEDED': 'COMPLETED',
'CANCELLED': 'CANCELLED',
'FAILED': 'FAILED'
}, 'UNKNOWN')
def convert_iam_policy_to_acl(policy):
"""Converts an IAM Policy proto to the legacy ACL format."""
bindings = {
binding['role']: binding.get('members', [])
for binding in policy.get('bindings', [])
}
owners = bindings.get('roles/owner', [])
readers = bindings.get('roles/viewer', [])
writers = bindings.get('roles/editor', [])
if 'allUsers' in readers:
all_users_can_read = True
readers.remove('allUsers')
else:
all_users_can_read = False
result = {'owners': owners, 'readers': readers, 'writers': writers}
if all_users_can_read:
result['all_users_can_read'] = True
return result
def convert_acl_to_iam_policy(acl):
"""Converts the legacy ACL format to an IAM Policy proto."""
owners = acl.get('owners', [])
readers = acl.get('readers', [])
if acl.get('all_users_can_read', False):
readers.append('allUsers')
writers = acl.get('writers', [])
bindings = []
if owners:
bindings.append({'role': 'roles/owner', 'members': owners})
if readers:
bindings.append({'role': 'roles/viewer', 'members': readers})
if writers:
bindings.append({'role': 'roles/editor', 'members': writers})
return {'bindings': bindings}
def convert_to_grid_dimensions(dimensions):
"""Converts an input value to GridDimensions.
Args:
dimensions: May specify a single number to indicate a square shape,
or a tuple of two dimensions to indicate (width,height).
Returns:
A GridDimensions as a dict.
"""
if isinstance(dimensions, six.integer_types):
return {'width': dimensions, 'height': dimensions}
elif len(dimensions) == 1:
return {'width': dimensions[0], 'height': dimensions[0]}
else:
return {'width': dimensions[0], 'height': dimensions[1]}
| apache-2.0 | -7,180,240,639,461,749,000 | 32.328358 | 80 | 0.652 | false |
victorfsf/RecRecife | recmap/admin.py | 1 | 2190 | # -*- encoding: utf-8 -*-
from django.contrib import admin
from recmap.models import Endereco, Horario, Coleta, Setor, ColetaHorario, Feedback
class EnderecoAdmin(admin.ModelAdmin):
fieldsets = (
(u'Nome da Rua', {'fields': ('nome_bruto', 'nome_min', 'nome')}),
(u'Bairro / Geolocalização', {'fields': ('bairro', 'latitude', 'longitude')}),
)
list_display = ('nome', 'bairro', 'latitude', 'longitude', 'nome_bruto')
search_fields = ('nome', 'bairro', 'latitude', 'longitude', 'nome_bruto', 'nome_min')
class HorarioAdmin(admin.ModelAdmin):
fieldsets = (
(u'Horário', {'fields': ('intervalo', 'turno')}),
)
list_display = ('intervalo', 'turno',)
search_fields = ('intervalo', 'turno',)
class ColetaAdmin(admin.ModelAdmin):
fieldsets = (
(u'Informações da coleta', {'fields': ('endereco', 'setor', 'rota')}),
)
list_display = ('endereco', 'setor', 'rota',)
search_fields = ('endereco__nome', 'endereco__bairro', 'setor__nome_setor', 'setor__frequencia', 'rota',)
class ColetaHorarioAdmin(admin.ModelAdmin):
fieldsets = (
(u'Informações', {'fields': ('coleta', 'horario',)}),
)
list_display = ('coleta', 'horario',)
search_fields = ('coleta__endereco__nome', 'coleta__endereco__bairro', 'horario__turno', 'horario__intervalo')
class SetorAdmin(admin.ModelAdmin):
fieldsets = (
(u'Informações', {'fields': ('nome_setor', 'frequencia',)}),
)
list_display = ('nome_setor', 'frequencia',)
search_fields = ('nome_setor', 'frequencia',)
class FeedbackAdmin(admin.ModelAdmin):
fieldsets = (
(u'Informações', {'fields': ('enviado_por', 'email', 'situacao', 'descricao','endereco', )}),
)
list_display = ('endereco', 'enviado_por', 'email', 'situacao', 'descricao',)
search_fields = ('endereco__nome', 'nome', 'email', 'situacao', 'descricao',)
admin.site.register(Endereco, EnderecoAdmin)
admin.site.register(Horario, HorarioAdmin)
admin.site.register(Coleta, ColetaAdmin)
admin.site.register(Setor, SetorAdmin)
admin.site.register(ColetaHorario, ColetaHorarioAdmin)
admin.site.register(Feedback, FeedbackAdmin) | gpl-2.0 | -719,037,859,188,247,000 | 28.863014 | 114 | 0.635613 | false |
blitzmann/Pyfa | gui/builtinAdditionPanes/droneView.py | 1 | 8775 | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
# noinspection PyPackageRequirements
import wx
import gui.globalEvents as GE
import gui.mainFrame
from gui.builtinMarketBrowser.events import ItemSelected, ITEM_SELECTED
from gui.display import Display
from gui.builtinViewColumns.state import State
from gui.contextMenu import ContextMenu
from gui.utils.staticHelpers import DragDropHelper
from service.fit import Fit
from service.market import Market
import gui.fitCommands as cmd
class DroneViewDrop(wx.DropTarget):
def __init__(self, dropFn, *args, **kwargs):
super(DroneViewDrop, self).__init__(*args, **kwargs)
self.dropFn = dropFn
# this is really transferring an EVE itemID
self.dropData = wx.TextDataObject()
self.SetDataObject(self.dropData)
def OnData(self, x, y, t):
if self.GetData():
dragged_data = DragDropHelper.data
data = dragged_data.split(':')
self.dropFn(x, y, data)
return t
class DroneView(Display):
DEFAULT_COLS = [
"State",
# "Base Icon",
"Base Name",
# "prop:droneDps,droneBandwidth",
"Max Range",
"Miscellanea",
"attr:maxVelocity",
"Price",
]
def __init__(self, parent):
Display.__init__(self, parent, style=wx.LC_SINGLE_SEL | wx.BORDER_NONE)
self.lastFitId = None
self.hoveredRow = None
self.hoveredColumn = None
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.mainFrame.Bind(GE.FIT_CHANGED, self.fitChanged)
self.mainFrame.Bind(ITEM_SELECTED, self.addItem)
self.Bind(wx.EVT_LEFT_DCLICK, self.removeItem)
self.Bind(wx.EVT_LEFT_DOWN, self.click)
self.Bind(wx.EVT_KEY_UP, self.kbEvent)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
self.Bind(wx.EVT_CONTEXT_MENU, self.spawnMenu)
self.Bind(wx.EVT_LIST_BEGIN_DRAG, self.startDrag)
self.SetDropTarget(DroneViewDrop(self.handleDragDrop))
def OnLeaveWindow(self, event):
self.SetToolTip(None)
self.hoveredRow = None
self.hoveredColumn = None
event.Skip()
def OnMouseMove(self, event):
row, _, col = self.HitTestSubItem(event.Position)
if row != self.hoveredRow or col != self.hoveredColumn:
if self.ToolTip is not None:
self.SetToolTip(None)
else:
self.hoveredRow = row
self.hoveredColumn = col
if row != -1 and col != -1 and col < len(self.DEFAULT_COLS):
mod = self.drones[self.GetItemData(row)]
if self.DEFAULT_COLS[col] == "Miscellanea":
tooltip = self.activeColumns[col].getToolTip(mod)
if tooltip is not None:
self.SetToolTip(tooltip)
else:
self.SetToolTip(None)
else:
self.SetToolTip(None)
else:
self.SetToolTip(None)
event.Skip()
def kbEvent(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_DELETE or keycode == wx.WXK_NUMPAD_DELETE:
row = self.GetFirstSelected()
if row != -1:
drone = self.drones[self.GetItemData(row)]
self.removeDrone(drone)
event.Skip()
def startDrag(self, event):
row = event.GetIndex()
if row != -1:
data = wx.TextDataObject()
dataStr = "drone:" + str(row)
data.SetText(dataStr)
dropSource = wx.DropSource(self)
dropSource.SetData(data)
DragDropHelper.data = dataStr
dropSource.DoDragDrop()
def handleDragDrop(self, x, y, data):
"""
Handles dragging of items from various pyfa displays which support it
data is list with two indices:
data[0] is hard-coded str of originating source
data[1] is typeID or index of data we want to manipulate
"""
if data[0] == "drone": # we want to merge drones
pass
# remove merge functionality, if people complain in the next while, can add it back
# srcRow = int(data[1])
# dstRow, _ = self.HitTest((x, y))
# if srcRow != -1 and dstRow != -1:
# self._merge(srcRow, dstRow)
elif data[0] == "market":
wx.PostEvent(self.mainFrame, ItemSelected(itemID=int(data[1])))
def _merge(self, src, dst):
sFit = Fit.getInstance()
fitID = self.mainFrame.getActiveFit()
if sFit.mergeDrones(fitID, self.drones[src], self.drones[dst]):
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=fitID))
DRONE_ORDER = ('Light Scout Drones', 'Medium Scout Drones',
'Heavy Attack Drones', 'Sentry Drones', 'Combat Utility Drones',
'Electronic Warfare Drones', 'Logistic Drones', 'Mining Drones', 'Salvage Drones')
def droneKey(self, drone):
sMkt = Market.getInstance()
groupName = sMkt.getMarketGroupByItem(drone.item).name
return (self.DRONE_ORDER.index(groupName),
drone.item.name)
def fitChanged(self, event):
sFit = Fit.getInstance()
fit = sFit.getFit(event.fitID)
self.Parent.Parent.DisablePage(self, not fit or fit.isStructure)
# Clear list and get out if current fitId is None
if event.fitID is None and self.lastFitId is not None:
self.DeleteAllItems()
self.lastFitId = None
event.Skip()
return
self.original = fit.drones if fit is not None else None
self.drones = stuff = fit.drones[:] if fit is not None else None
if stuff is not None:
stuff.sort(key=self.droneKey)
if event.fitID != self.lastFitId:
self.lastFitId = event.fitID
item = self.GetNextItem(-1, wx.LIST_NEXT_ALL, wx.LIST_STATE_DONTCARE)
if item != -1:
self.EnsureVisible(item)
self.deselectItems()
self.update(stuff)
event.Skip()
def addItem(self, event):
sFit = Fit.getInstance()
fitID = self.mainFrame.getActiveFit()
fit = sFit.getFit(fitID)
if not fit or fit.isStructure:
event.Skip()
return
if self.mainFrame.command.Submit(cmd.GuiAddDroneCommand(fitID, event.itemID)):
self.mainFrame.additionsPane.select("Drones")
event.Skip()
def removeItem(self, event):
row, _ = self.HitTest(event.Position)
if row != -1:
col = self.getColumn(event.Position)
if col != self.getColIndex(State):
drone = self.drones[self.GetItemData(row)]
self.removeDrone(drone)
def removeDrone(self, drone):
fitID = self.mainFrame.getActiveFit()
self.mainFrame.command.Submit(cmd.GuiRemoveDroneCommand(fitID, self.original.index(drone)))
def click(self, event):
event.Skip()
row, _ = self.HitTest(event.Position)
if row != -1:
col = self.getColumn(event.Position)
if col == self.getColIndex(State):
fitID = self.mainFrame.getActiveFit()
drone = self.drones[row]
self.mainFrame.command.Submit(cmd.GuiToggleDroneCommand(fitID, self.original.index(drone)))
def spawnMenu(self, event):
sel = self.GetFirstSelected()
if sel != -1:
drone = self.drones[sel]
sMkt = Market.getInstance()
sourceContext = "droneItem"
itemContext = sMkt.getCategoryByItem(drone.item).name
menu = ContextMenu.getMenu((drone,), (sourceContext, itemContext))
self.PopupMenu(menu)
| gpl-3.0 | 3,243,596,477,556,555,000 | 33.960159 | 107 | 0.587692 | false |
codelv/enaml-native | src/enamlnative/android/android_toast.py | 1 | 5004 | """
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on Sept 18, 2017
@author: jrm
"""
from atom.api import Typed, Bool, set_default
from .bridge import JavaBridgeObject, JavaMethod, JavaStaticMethod
from enamlnative.widgets.toast import ProxyToast
from .android_toolkit_object import AndroidToolkitObject
class Toast(JavaBridgeObject):
#: Show the view for the specified duration.
__nativeclass__ = set_default('android.widget.Toast')
__signature__ = set_default(('android.content.Context',))
makeText = JavaStaticMethod('android.content.Context',
'java.lang.CharSequence', 'int',
returns='android.widget.Toast')
show = JavaMethod()
cancel = JavaMethod()
setDuration = JavaMethod('int')
setGravity = JavaMethod('int', 'int', 'int')
setText = JavaMethod('java.lang.CharSequence')
setView = JavaMethod('android.view.View')
class AndroidToast(AndroidToolkitObject, ProxyToast):
""" An Android implementation of an Enaml ProxyToast.
"""
#: A reference to the widget created by the proxy.
toast = Typed(Toast)
#: Made toast
#: Android doesn't let us simply update the text of an existing toast
#: unless it was created with "makeToast"
made_toast = Bool()
# -------------------------------------------------------------------------
# Initialization API
# -------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying widget.
A toast is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent (which crashes).
"""
d = self.declaration
if d.text:
Toast.makeText(self.get_context(),
d.text, 1).then(self.on_make_toast)
self.made_toast = True
else:
self.toast = Toast(self.get_context())
def init_widget(self):
""" Our widget may not exist yet so we have to diverge from the normal
way of doing initialization. See `update_widget`
"""
if not self.toast:
return
super(AndroidToast, self).init_widget()
d = self.declaration
if not self.made_toast:
#: Set it to LONG
self.toast.setDuration(1)
if d.gravity:
self.set_gravity(d.gravity)
if d.show:
self.set_show(d.show)
def init_layout(self):
""" If a view is given show it """
super(AndroidToast, self).init_layout()
if not self.made_toast:
for view in self.child_widgets():
self.toast.setView(view)
break
def child_added(self, child):
""" Overwrite the view """
view = child.widget
if view is not None:
self.toast.setView(view)
def on_make_toast(self, ref):
""" Using Toast.makeToast returns async so we have to initialize it
later.
"""
d = self.declaration
self.toast = Toast(__id__=ref)
self.init_widget()
def _refresh_show(self, dt):
""" While the toast.show is true, keep calling .show() until the
duration `dt` expires.
Parameters
------------
dt: int
Time left to keep showing
"""
d = self.declaration
if dt <= 0:
#: Done, hide
d.show = False
elif d.show:
#: If user didn't cancel it, keep it alive
self.toast.show()
t = min(1000, dt)
app = self.get_context()
app.timed_call(t, self._refresh_show, dt-t)
# -------------------------------------------------------------------------
# ProxyToast API
# -------------------------------------------------------------------------
def set_text(self, text):
#: Only possible if a custom view is not used
if self.made_toast:
self.toast.setText(text)
def set_duration(self, duration):
""" Android for whatever stupid reason doesn't let you set the time
it only allows 1-long or 0-short. So we have to repeatedly call show
until the duration expires, hence this method does nothing see
`set_show`.
"""
pass
def set_show(self, show):
if show:
d = self.declaration
self.toast.show()
#: Get app
app = self.get_context()
t = min(1000, d.duration)
app.timed_call(t, self._refresh_show, d.duration-t)
else:
self.toast.cancel()
def set_layout(self, layout):
pass
def set_gravity(self, gravity):
d = self.declaration
self.toast.setGravity(gravity, int(d.x), int(d.y)) | mit | -5,463,170,937,494,007,000 | 30.28125 | 79 | 0.540568 | false |
ImmaculateObsession/omnomer | src/omnomer/omnomer/wsgi.py | 1 | 1136 | """
WSGI config for omnomer project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "omnomer.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | 6,142,718,725,068,128,000 | 39.571429 | 79 | 0.800176 | false |
demonchild2112/travis-test | grr/server/grr_response_server/databases/mem_events.py | 1 | 2164 | #!/usr/bin/env python
"""The in memory database methods for event handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
class InMemoryDBEventMixin(object):
"""InMemoryDB mixin for event handling."""
@utils.Synchronized
def ReadAPIAuditEntries(self,
username=None,
router_method_names=None,
min_timestamp=None,
max_timestamp=None):
"""Returns audit entries stored in the database."""
results = []
for entry in self.api_audit_entries:
if username is not None and entry.username != username:
continue
if (router_method_names and
entry.router_method_name not in router_method_names):
continue
if min_timestamp is not None and entry.timestamp < min_timestamp:
continue
if max_timestamp is not None and entry.timestamp > max_timestamp:
continue
results.append(entry)
return sorted(results, key=lambda entry: entry.timestamp)
@utils.Synchronized
def CountAPIAuditEntriesByUserAndDay(self,
min_timestamp=None,
max_timestamp=None):
"""Returns audit entry counts grouped by user and calendar day."""
results = collections.Counter()
for entry in self.api_audit_entries:
if min_timestamp is not None and entry.timestamp < min_timestamp:
continue
if max_timestamp is not None and entry.timestamp > max_timestamp:
continue
# Truncate DateTime by removing the time-part to allow grouping by date.
day = rdfvalue.RDFDatetime.FromDate(entry.timestamp.AsDatetime().date())
results[(entry.username, day)] += 1
return dict(results)
@utils.Synchronized
def WriteAPIAuditEntry(self, entry):
"""Writes an audit entry to the database."""
copy = entry.Copy()
copy.timestamp = rdfvalue.RDFDatetime.Now()
self.api_audit_entries.append(copy)
| apache-2.0 | -6,057,583,941,528,205,000 | 31.298507 | 78 | 0.649261 | false |
thinkopensolutions/server-tools | users_ldap_populate/models/users_ldap.py | 1 | 2682 | # -*- coding: utf-8 -*-
# © 2012 Therp BV (<http://therp.nl>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/gpl.html).
import re
from odoo import models, api, _
from odoo.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
try:
from ldap.filter import filter_format
except ImportError:
_logger.debug('Can not `from ldap.filter import filter_format`.')
class CompanyLDAP(models.Model):
_inherit = 'res.company.ldap'
@api.multi
def action_populate(self):
"""
Prepopulate the user table from one or more LDAP resources.
Obviously, the option to create users must be toggled in
the LDAP configuration.
Return the number of users created (as far as we can tell).
"""
users_pool = self.env['res.users']
users_no_before = users_pool.search_count([])
logger = logging.getLogger('orm.ldap')
logger.debug("action_populate called on res.company.ldap ids %s",
self.ids)
for conf in self.get_ldap_dicts():
if not conf['create_user']:
continue
attribute_match = re.search(
r'([a-zA-Z_]+)=\%s', conf['ldap_filter'])
if attribute_match:
login_attr = attribute_match.group(1)
else:
raise UserError(
_("No login attribute found: "
"Could not extract login attribute from filter %s") %
conf['ldap_filter'])
ldap_filter = filter_format(conf['ldap_filter'] % '*', ())
for result in self.query(conf, ldap_filter.encode('utf-8')):
self.get_or_create_user(conf, result[1][login_attr][0], result)
users_no_after = users_pool.search_count([])
users_created = users_no_after - users_no_before
logger.debug("%d users created", users_created)
return users_created
@api.multi
def populate_wizard(self):
"""
GUI wrapper for the populate method that reports back
the number of users created.
"""
if not self:
return
wizard_obj = self.env['res.company.ldap.populate_wizard']
res_id = wizard_obj.create({'ldap_id': self.id}).id
return {
'name': wizard_obj._description,
'view_type': 'form',
'view_mode': 'form',
'res_model': wizard_obj._name,
'domain': [],
'context': self.env.context,
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': res_id,
'nodestroy': True,
}
| agpl-3.0 | -8,825,716,694,864,436,000 | 32.098765 | 79 | 0.556509 | false |
kimus/django-blocks | blocks/migrations/0006_auto__chg_field_menu_slug.py | 1 | 6647 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Menu.slug'
db.alter_column(u'blocks_menu', 'slug', self.gf('blocks.fields.SlugURLField')(max_length=200, null=True))
def backwards(self, orm):
# Changing field 'Menu.slug'
db.alter_column(u'blocks_menu', 'slug', self.gf('blocks.fields.SlugURLField')(default='', max_length=200))
models = {
u'blocks.menu': {
'Meta': {'object_name': 'Menu'},
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('blocks.fields.OrderField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['blocks.Menu']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sites.Site']", 'db_index': 'True', 'symmetrical': 'False'}),
'slug': ('blocks.fields.SlugURLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'})
},
u'blocks.menutranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'MenuTranslation', 'db_table': "u'blocks_menu_translation'"},
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['blocks.Menu']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'blocks.page': {
'Meta': {'ordering': "['url', 'order']", 'object_name': 'Page'},
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_relative': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'menu': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('blocks.fields.OrderField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sites.Site']", 'db_index': 'True', 'symmetrical': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
u'blocks.pagetranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'PageTranslation', 'db_table': "u'blocks_page_translation'"},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['blocks.Page']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'blocks.promotable': {
'Meta': {'object_name': 'Promotable'},
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'promoted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sites.Site']", 'db_index': 'True', 'symmetrical': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'blocks.template': {
'Meta': {'object_name': 'Template'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['blocks'] | mit | 4,874,328,548,890,864,000 | 70.483871 | 154 | 0.546863 | false |
spirali/nukecon | src/base/structure.py | 1 | 10222 | import logging
import os.path
from base import paths
from base import utils
import xml.etree.ElementTree as xml
import itertools
import copy
GAMMA_LIMITS = [ 30, 90, 150, 210, 270, 330, 9999 ]
GAMMA_NAMES = [ "sp", "+sc", "+ac", "ap", "-ac", "-sc", "sp" ]
DIRECTION_LIMITS = [ 45, 135, 225, 315 ]
DIRECTION_NAMES = [ "North", "East", "South", "West" ]
class Result:
def __init__(self):
self.gamma = None
self.p = None
self.tm = None
self.synanti = None
self.mixed_results = 1
@property
def dir_index(self):
for i, limit in enumerate(DIRECTION_LIMITS):
if self.p < limit:
return i
return 0
@property
def gamma_index(self):
for i, limit in enumerate(GAMMA_LIMITS):
if self.gamma < limit:
return i
else:
raise Exception("Invalid value")
@property
def dir_name(self):
return DIRECTION_NAMES[self.dir_index]
@property
def gamma_name(self):
return GAMMA_NAMES[self.gamma_index]
def to_element(self):
e = xml.Element("result")
e.set("gamma", str(self.gamma))
e.set("p", str(self.p))
e.set("tm", str(self.tm))
e.set("synanti", str(self.synanti))
return e
@classmethod
def from_element(cls, e):
result = cls()
result.gamma = float(e.get("gamma"))
result.p = float(e.get("p"))
result.tm = float(e.get("tm"))
result.synanti = float(e.get("synanti"))
return result
class Chain:
def __init__(self, id):
self.id = id
self.ec_numbers = []
self.compound = None
self.results = []
def add_result(self, result):
self.results.append(result)
@property
def ec_numbers_str(self):
return ", ".join(self.ec_numbers)
def to_element(self):
e = xml.Element("chain")
e.set("id", self.id)
e.set("compound", self.compound)
for ec_no in self.ec_numbers:
e2 = xml.Element("ec-number")
e2.text = str(ec_no)
e.append(e2)
for result in self.results:
e.append(result.to_element())
return e
@classmethod
def from_element(cls, element):
chain = cls(element.get("id"))
chain.ec_numbers = [ e.text for e in element.findall("ec-number") ]
chain.compound = element.get("compound")
chain.results = [ Result.from_element(e) for e in element.findall("result") ]
return chain
def avg_results(results):
r = Result()
l = len(results)
r.mixed_results = l
r.gamma = (sum(s.gamma for s in results) % 360.0) / l
r.tm = (sum(s.tm for s in results) % 360.0) / l
r.p = (sum(s.p for s in results) % 360.0) / l
return r
def angle_diff(a, b):
d = abs(a - b)
if d > 180.0:
return d - 180.0
else:
return d
def join_chains(chains, angle_limit):
def key(v):
return v[1].p
results = []
for c in chains:
results.extend((c, r) for r in c.results)
if not results:
return results
results.sort(key=key)
for n in xrange(1, len(results) + 1):
best_angle = 360.0
best_partition = None
for partition in utils.make_partitions(results, n):
angle = 0
for s in partition:
a = sum(angle_diff(s[i-1][1].p, s[i][1].p) for i in xrange(1, len(s)))
if a > angle:
angle = a
if angle < best_angle:
best_angle = angle
best_partition = partition
if best_angle <= angle_limit:
break
result = []
for s in best_partition:
chains = list(set(c for c, r in s))
chains.sort(key=lambda c: c.id)
chain = Chain(",".join(c.id for c in chains))
chain.results = [ avg_results([r for c, r, in s]) ]
chain.ec_numbers = chains[0].ec_numbers
chain.compound = chains[0].compound
result.append(chain)
return result
class Structure:
def __init__(self, id):
self.id = id
self.downloaded = False
self.resolution = None
self.exp_technique = None
self.title = None
self.chains = []
@property
def filename(self):
return os.path.join(paths.DATA,
self.id[:2].lower(),
"pdb{0}.ent".format(self.id.lower()))
def get_chain(self, id):
for chain in self.chains:
if chain.id == id:
return chain
def join_chains(self, angle_limit):
s = copy.copy(self)
if self.chains:
s.chains = join_chains(self.chains, angle_limit)
return s
def to_element(self):
e = xml.Element("structure")
e.set("id", str(self.id))
if self.resolution is not None:
e.set("resolution", str(self.resolution))
e.set("exp-technique", self.exp_technique)
e.set("title", self.title)
for chain in self.chains:
e.append(chain.to_element())
return e
def fill_download_info(self):
self.downloaded = os.path.isfile(self.filename)
def strip_empty_chains(self):
s = copy.copy(self)
s.chains = [ chain for chain in self.chains if chain.results ]
return s
@classmethod
def from_datarow(cls, row):
id, chains = row
id, chain_id, title, compound, resolution, exp_technique, ec_no \
= chains[0]
s = cls(id)
try:
s.resolution = float(resolution)
except ValueError:
s.resolution = None
s.exp_technique = exp_technique
s.title = title
for c in chains:
id, chain_id, t, c, resolution, exp_technique, ec_no = c
assert t == title
chain = Chain(chain_id)
chain.compound = c
if ec_no:
chain.ec_numbers = ec_no.split("#")
s.chains.append(chain)
return s
@classmethod
def from_element(cls, element):
s = cls(element.get("id"))
resolution = element.get("resolution", None)
if resolution is not None:
s.resolution = float(resolution)
s.exp_technique = element.get("exp-technique")
s.title = element.get("title", None)
s.chains = [ Chain.from_element(e) for e in element.findall("chain") ]
return s
class StructureList:
def __init__(self, datarows=None, xmlfile=None, structures=None):
if structures is None:
structures = []
self.structures = structures
if datarows is not None:
for row in datarows:
self.structures.append(Structure.from_datarow(row))
if xmlfile is not None:
try:
tree = xml.parse(xmlfile)
except Exception:
logging.debug("File with structures not found")
return
for e in tree.getroot():
self.structures.append(Structure.from_element(e))
def get_missing(self, slist):
my = set(s.id for s in self.structures)
other = set(s.id for s in slist.structures)
diff = other - my
result = []
for s in slist.structures:
if s.id in diff:
result.append(s)
return StructureList(structures=result)
def add(self, slist):
self.structures.extend(slist.structures)
def save(self, filename):
root = xml.Element("structures")
for s in self.structures:
root.append(s.to_element())
tree = xml.ElementTree(root)
tree.write(filename)
def get_ids(self):
return [ s.id for s in self.structures]
def compare(self, other):
my_ids = frozenset(self.get_ids())
other_ids = frozenset(other.get_ids())
return len(my_ids - other_ids), len(other_ids - my_ids)
def make_resolution_stats(self):
resolution_stats = [ 0, 0, 0, 0, 0 ]
for s in self.structures:
if s.resolution is None:
resolution_stats[0] += 1
elif s.resolution <= 1.0:
resolution_stats[1] += 1
elif s.resolution <= 2.0:
resolution_stats[2] += 1
elif s.resolution <= 3.0:
resolution_stats[3] += 1
else:
resolution_stats[4] += 1
return resolution_stats
def filter(self, max_resolution=None):
structures = self.structures
if max_resolution is not None:
structures = (s for s in structures
if s.resolution and
s.resolution <= max_resolution)
return StructureList(structures=list(structures))
def filter_downloaded(self):
structures = [ s for s in self.structures if s.downloaded ]
return StructureList(structures=structures)
def filter_not_downloaded(self):
structures = [ s for s in self.structures if not s.downloaded ]
return StructureList(structures=structures)
def fill_download_info(self):
for s in self.structures:
s.fill_download_info()
def filter_with_results(self):
structures = [ s for s in self.structures
if any(c.results for c in s.chains) ]
return StructureList(structures=structures)
def join_chains(self, angle_limit):
structures = [ s.join_chains(angle_limit) for s in self.structures ]
return StructureList(structures=structures)
def strip_empty_chains(self):
return StructureList(
structures=[ s.strip_empty_chains() for s in self.structures ])
@property
def chains(self):
return itertools.chain.from_iterable(s.chains for s in self.structures)
@property
def results(self):
return itertools.chain.from_iterable(c.results for c in self.chains)
def make_table(self):
return []
def __iter__(self):
return iter(self.structures)
def __len__(self):
return len(self.structures)
| bsd-3-clause | 5,950,072,718,312,121,000 | 27.794366 | 86 | 0.553414 | false |
rahulunair/nova | nova/conductor/tasks/live_migrate.py | 1 | 27649 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslo_messaging as messaging
import six
from nova import availability_zones
from nova.compute import power_state
from nova.compute import utils as compute_utils
from nova.conductor.tasks import base
from nova.conductor.tasks import migrate
import nova.conf
from nova import exception
from nova.i18n import _
from nova.network import neutron
from nova import objects
from nova.objects import fields as obj_fields
from nova.objects import migrate_data as migrate_data_obj
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
def supports_vif_related_pci_allocations(context, host):
"""Checks if the compute host service is new enough to support
VIF related PCI allocation during live migration
:param context: The user request context.
:param host: The nova-compute host to check.
:returns: True if the compute host is new enough to support vif related
PCI allocations
"""
svc = objects.Service.get_by_host_and_binary(context, host, 'nova-compute')
return svc.version >= 36
class LiveMigrationTask(base.TaskBase):
def __init__(self, context, instance, destination,
block_migration, disk_over_commit, migration, compute_rpcapi,
servicegroup_api, query_client, report_client,
request_spec=None):
super(LiveMigrationTask, self).__init__(context, instance)
self.destination = destination
self.block_migration = block_migration
self.disk_over_commit = disk_over_commit
self.migration = migration
self.source = instance.host
self.migrate_data = None
self.limits = None
self.compute_rpcapi = compute_rpcapi
self.servicegroup_api = servicegroup_api
self.query_client = query_client
self.report_client = report_client
self.request_spec = request_spec
self._source_cn = None
self._held_allocations = None
self.network_api = neutron.API()
def _execute(self):
self._check_instance_is_active()
self._check_instance_has_no_numa()
self._check_host_is_up(self.source)
self._source_cn, self._held_allocations = (
# NOTE(danms): This may raise various exceptions, which will
# propagate to the API and cause a 500. This is what we
# want, as it would indicate internal data structure corruption
# (such as missing migrations, compute nodes, etc).
migrate.replace_allocation_with_migration(self.context,
self.instance,
self.migration))
if not self.destination:
# Either no host was specified in the API request and the user
# wants the scheduler to pick a destination host, or a host was
# specified but is not forcing it, so they want the scheduler
# filters to run on the specified host, like a scheduler hint.
self.destination, dest_node, self.limits = self._find_destination()
else:
# This is the case that the user specified the 'force' flag when
# live migrating with a specific destination host so the scheduler
# is bypassed. There are still some minimal checks performed here
# though.
source_node, dest_node = self._check_requested_destination()
# Now that we're semi-confident in the force specified host, we
# need to copy the source compute node allocations in Placement
# to the destination compute node. Normally select_destinations()
# in the scheduler would do this for us, but when forcing the
# target host we don't call the scheduler.
# TODO(mriedem): Call select_destinations() with a
# skip_filters=True flag so the scheduler does the work of claiming
# resources on the destination in Placement but still bypass the
# scheduler filters, which honors the 'force' flag in the API.
# This raises NoValidHost which will be handled in
# ComputeTaskManager.
# NOTE(gibi): consumer_generation = None as we expect that the
# source host allocation is held by the migration therefore the
# instance is a new, empty consumer for the dest allocation. If
# this assumption fails then placement will return consumer
# generation conflict and this call raise a AllocationUpdateFailed
# exception. We let that propagate here to abort the migration.
scheduler_utils.claim_resources_on_destination(
self.context, self.report_client,
self.instance, source_node, dest_node,
source_allocations=self._held_allocations,
consumer_generation=None)
# dest_node is a ComputeNode object, so we need to get the actual
# node name off it to set in the Migration object below.
dest_node = dest_node.hypervisor_hostname
self.instance.availability_zone = (
availability_zones.get_host_availability_zone(
self.context, self.destination))
self.migration.source_node = self.instance.node
self.migration.dest_node = dest_node
self.migration.dest_compute = self.destination
self.migration.save()
# TODO(johngarbutt) need to move complexity out of compute manager
# TODO(johngarbutt) disk_over_commit?
return self.compute_rpcapi.live_migration(self.context,
host=self.source,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migration=self.migration,
migrate_data=self.migrate_data)
def rollback(self, ex):
# TODO(johngarbutt) need to implement the clean up operation
# but this will make sense only once we pull in the compute
# calls, since this class currently makes no state changes,
# except to call the compute method, that has no matching
# rollback call right now.
if self._held_allocations:
migrate.revert_allocation_for_migration(self.context,
self._source_cn,
self.instance,
self.migration)
def _check_instance_is_active(self):
if self.instance.power_state not in (power_state.RUNNING,
power_state.PAUSED):
raise exception.InstanceInvalidState(
instance_uuid=self.instance.uuid,
attr='power_state',
state=power_state.STATE_MAP[self.instance.power_state],
method='live migrate')
def _check_instance_has_no_numa(self):
"""Prevent live migrations of instances with NUMA topologies.
TODO(artom) Remove this check in compute RPC 6.0.
"""
if not self.instance.numa_topology:
return
# Only KVM (libvirt) supports NUMA topologies with CPU pinning;
# HyperV's vNUMA feature doesn't allow specific pinning
hypervisor_type = objects.ComputeNode.get_by_host_and_nodename(
self.context, self.source, self.instance.node).hypervisor_type
# KVM is not a hypervisor, so when using a virt_type of "kvm" the
# hypervisor_type will still be "QEMU".
if hypervisor_type.lower() != obj_fields.HVType.QEMU:
return
# We're fully upgraded to a version that supports NUMA live
# migration, carry on.
if objects.Service.get_minimum_version(
self.context, 'nova-compute') >= 40:
return
if CONF.workarounds.enable_numa_live_migration:
LOG.warning(
'Instance has an associated NUMA topology, cell contains '
'compute nodes older than train, but the '
'enable_numa_live_migration workaround is enabled. Live '
'migration will not be NUMA-aware. The instance NUMA '
'topology, including related attributes such as CPU pinning, '
'huge page and emulator thread pinning information, will not '
'be recalculated. See bug #1289064 for more information.',
instance=self.instance)
else:
raise exception.MigrationPreCheckError(
reason='Instance has an associated NUMA topology, cell '
'contains compute nodes older than train, and the '
'enable_numa_live_migration workaround is disabled. '
'Refusing to perform the live migration, as the '
'instance NUMA topology, including related attributes '
'such as CPU pinning, huge page and emulator thread '
'pinning information, cannot be recalculated. See '
'bug #1289064 for more information.')
def _check_can_migrate_pci(self, src_host, dest_host):
"""Checks that an instance can migrate with PCI requests.
At the moment support only if:
1. Instance contains VIF related PCI requests.
2. Neutron supports multiple port binding extension.
3. Src and Dest host support VIF related PCI allocations.
"""
if self.instance.pci_requests is None or not len(
self.instance.pci_requests.requests):
return
for pci_request in self.instance.pci_requests.requests:
if pci_request.source != objects.InstancePCIRequest.NEUTRON_PORT:
# allow only VIF related PCI requests in live migration.
raise exception.MigrationPreCheckError(
reason= "non-VIF related PCI requests for instance "
"are not allowed for live migration.")
# All PCI requests are VIF related, now check neutron,
# source and destination compute nodes.
if not self.network_api.supports_port_binding_extension(
self.context):
raise exception.MigrationPreCheckError(
reason="Cannot live migrate VIF with related PCI, Neutron "
"does not support required port binding extension.")
if not (supports_vif_related_pci_allocations(self.context,
src_host) and
supports_vif_related_pci_allocations(self.context,
dest_host)):
raise exception.MigrationPreCheckError(
reason="Cannot live migrate VIF with related PCI, "
"source and destination nodes do not support "
"the operation.")
def _check_host_is_up(self, host):
service = objects.Service.get_by_compute_host(self.context, host)
if not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host)
def _check_requested_destination(self):
"""Performs basic pre-live migration checks for the forced host.
:returns: tuple of (source ComputeNode, destination ComputeNode)
"""
self._check_destination_is_not_source()
self._check_host_is_up(self.destination)
self._check_destination_has_enough_memory()
source_node, dest_node = self._check_compatible_with_source_hypervisor(
self.destination)
# NOTE(gibi): This code path is used when the live migration is forced
# to a target host and skipping the scheduler. Such operation is
# rejected for servers with nested resource allocations since
# I7cbd5d9fb875ebf72995362e0b6693492ce32051. So here we can safely
# assume that the provider mapping is empty.
self._call_livem_checks_on_host(self.destination, {})
# Make sure the forced destination host is in the same cell that the
# instance currently lives in.
# NOTE(mriedem): This can go away if/when the forced destination host
# case calls select_destinations.
source_cell_mapping = self._get_source_cell_mapping()
dest_cell_mapping = self._get_destination_cell_mapping()
if source_cell_mapping.uuid != dest_cell_mapping.uuid:
raise exception.MigrationPreCheckError(
reason=(_('Unable to force live migrate instance %s '
'across cells.') % self.instance.uuid))
return source_node, dest_node
def _check_destination_is_not_source(self):
if self.destination == self.source:
raise exception.UnableToMigrateToSelf(
instance_id=self.instance.uuid, host=self.destination)
def _check_destination_has_enough_memory(self):
compute = self._get_compute_info(self.destination)
free_ram_mb = compute.free_ram_mb
total_ram_mb = compute.memory_mb
mem_inst = self.instance.memory_mb
# NOTE(sbauza): Now the ComputeNode object reports an allocation ratio
# that can be provided by the compute_node if new or by the controller
ram_ratio = compute.ram_allocation_ratio
# NOTE(sbauza): Mimic the RAMFilter logic in order to have the same
# ram validation
avail = total_ram_mb * ram_ratio - (total_ram_mb - free_ram_mb)
if not mem_inst or avail <= mem_inst:
instance_uuid = self.instance.uuid
dest = self.destination
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
"Lack of memory(host:%(avail)s <= "
"instance:%(mem_inst)s)")
raise exception.MigrationPreCheckError(reason=reason % dict(
instance_uuid=instance_uuid, dest=dest, avail=avail,
mem_inst=mem_inst))
def _get_compute_info(self, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
self.context, host)
def _check_compatible_with_source_hypervisor(self, destination):
source_info = self._get_compute_info(self.source)
destination_info = self._get_compute_info(destination)
source_type = source_info.hypervisor_type
destination_type = destination_info.hypervisor_type
if source_type != destination_type:
raise exception.InvalidHypervisorType()
source_version = source_info.hypervisor_version
destination_version = destination_info.hypervisor_version
if source_version > destination_version:
raise exception.DestinationHypervisorTooOld()
return source_info, destination_info
def _call_livem_checks_on_host(self, destination, provider_mapping):
self._check_can_migrate_pci(self.source, destination)
try:
self.migrate_data = self.compute_rpcapi.\
check_can_live_migrate_destination(self.context, self.instance,
destination, self.block_migration, self.disk_over_commit,
self.migration, self.limits)
except messaging.MessagingTimeout:
msg = _("Timeout while checking if we can live migrate to host: "
"%s") % destination
raise exception.MigrationPreCheckError(msg)
# Check to see that neutron supports the binding-extended API.
if self.network_api.supports_port_binding_extension(self.context):
if 'vifs' not in self.migrate_data:
# migrate data vifs were not constructed in dest compute
# during check_can_live_migrate_destination, construct a
# skeleton to be updated after port binding.
# TODO(adrianc): This can be removed once we move to U release
self.migrate_data.vifs = migrate_data_obj.VIFMigrateData.\
create_skeleton_migrate_vifs(
self.instance.get_network_info())
bindings = self._bind_ports_on_destination(
destination, provider_mapping)
self._update_migrate_vifs_from_bindings(self.migrate_data.vifs,
bindings)
@staticmethod
def _get_port_profile_from_provider_mapping(port_id, provider_mappings):
if port_id in provider_mappings:
# NOTE(gibi): In the resource provider mapping there can be
# more than one RP fulfilling a request group. But resource
# requests of a Neutron port is always mapped to a
# numbered request group that is always fulfilled by one
# resource provider. So we only pass that single RP UUID
# here.
return {'allocation': provider_mappings[port_id][0]}
else:
return {}
def _bind_ports_on_destination(self, destination, provider_mappings):
LOG.debug('Start binding ports on destination host: %s', destination,
instance=self.instance)
# Bind ports on the destination host; returns a dict, keyed by
# port ID, of a new destination host port binding dict per port
# that was bound. This information is then stuffed into the
# migrate_data.
try:
# NOTE(adrianc): migrate_data.vifs was partially filled
# by destination compute if compute is new enough.
# if that is the case, it may have updated the required port
# profile for the destination node (e.g new PCI address if SR-IOV)
# perform port binding against the requested profile
ports_profile = {}
for mig_vif in self.migrate_data.vifs:
profile = mig_vif.profile if 'profile_json' in mig_vif else {}
# NOTE(gibi): provider_mappings also contribute to the
# binding profile of the ports if the port has resource
# request. So we need to merge the profile information from
# both sources.
profile.update(
self._get_port_profile_from_provider_mapping(
mig_vif.port_id, provider_mappings))
if profile:
ports_profile[mig_vif.port_id] = profile
bindings = self.network_api.bind_ports_to_host(
context=self.context, instance=self.instance, host=destination,
vnic_types=None, port_profiles=ports_profile)
except exception.PortBindingFailed as e:
# Port binding failed for that host, try another one.
raise exception.MigrationPreCheckError(
reason=e.format_message())
return bindings
def _update_migrate_vifs_from_bindings(self, migrate_vifs, bindings):
for migrate_vif in migrate_vifs:
for attr_name, attr_val in bindings[migrate_vif.port_id].items():
setattr(migrate_vif, attr_name, attr_val)
def _get_source_cell_mapping(self):
"""Returns the CellMapping for the cell in which the instance lives
:returns: nova.objects.CellMapping record for the cell where
the instance currently lives.
:raises: MigrationPreCheckError - in case a mapping is not found
"""
try:
return objects.InstanceMapping.get_by_instance_uuid(
self.context, self.instance.uuid).cell_mapping
except exception.InstanceMappingNotFound:
raise exception.MigrationPreCheckError(
reason=(_('Unable to determine in which cell '
'instance %s lives.') % self.instance.uuid))
def _get_destination_cell_mapping(self):
"""Returns the CellMapping for the destination host
:returns: nova.objects.CellMapping record for the cell where
the destination host is mapped.
:raises: MigrationPreCheckError - in case a mapping is not found
"""
try:
return objects.HostMapping.get_by_host(
self.context, self.destination).cell_mapping
except exception.HostMappingNotFound:
raise exception.MigrationPreCheckError(
reason=(_('Unable to determine in which cell '
'destination host %s lives.') % self.destination))
def _get_request_spec_for_select_destinations(self, attempted_hosts=None):
"""Builds a RequestSpec that can be passed to select_destinations
Used when calling the scheduler to pick a destination host for live
migrating the instance.
:param attempted_hosts: List of host names to ignore in the scheduler.
This is generally at least seeded with the source host.
:returns: nova.objects.RequestSpec object
"""
request_spec = self.request_spec
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
port_res_req = (
self.network_api.get_requested_resource_for_instance(
self.context, self.instance.uuid))
# NOTE(gibi): When cyborg or other module wants to handle
# similar non-nova resources then here we have to collect
# all the external resource requests in a single list and
# add them to the RequestSpec.
request_spec.requested_resources = port_res_req
scheduler_utils.setup_instance_group(self.context, request_spec)
# We currently only support live migrating to hosts in the same
# cell that the instance lives in, so we need to tell the scheduler
# to limit the applicable hosts based on cell.
cell_mapping = self._get_source_cell_mapping()
LOG.debug('Requesting cell %(cell)s while live migrating',
{'cell': cell_mapping.identity},
instance=self.instance)
if ('requested_destination' in request_spec and
request_spec.requested_destination):
request_spec.requested_destination.cell = cell_mapping
else:
request_spec.requested_destination = objects.Destination(
cell=cell_mapping)
request_spec.ensure_project_and_user_id(self.instance)
request_spec.ensure_network_metadata(self.instance)
compute_utils.heal_reqspec_is_bfv(
self.context, request_spec, self.instance)
return request_spec
def _find_destination(self):
# TODO(johngarbutt) this retry loop should be shared
attempted_hosts = [self.source]
request_spec = self._get_request_spec_for_select_destinations(
attempted_hosts)
host = None
while host is None:
self._check_not_over_max_retries(attempted_hosts)
request_spec.ignore_hosts = attempted_hosts
try:
selection_lists = self.query_client.select_destinations(
self.context, request_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)
# We only need the first item in the first list, as there is
# only one instance, and we don't care about any alternates.
selection = selection_lists[0][0]
host = selection.service_host
except messaging.RemoteError as ex:
# TODO(ShaoHe Feng) There maybe multi-scheduler, and the
# scheduling algorithm is R-R, we can let other scheduler try.
# Note(ShaoHe Feng) There are types of RemoteError, such as
# NoSuchMethod, UnsupportedVersion, we can distinguish it by
# ex.exc_type.
raise exception.MigrationSchedulerRPCError(
reason=six.text_type(ex))
scheduler_utils.fill_provider_mapping(request_spec, selection)
provider_mapping = request_spec.get_request_group_mapping()
if provider_mapping:
# NOTE(gibi): this call might update the pci_requests of the
# instance based on the destination host if so then such change
# will be persisted when post_live_migration_at_destination
# runs.
compute_utils.\
update_pci_request_spec_with_allocated_interface_name(
self.context, self.report_client, self.instance,
provider_mapping)
try:
self._check_compatible_with_source_hypervisor(host)
self._call_livem_checks_on_host(host, provider_mapping)
except (exception.Invalid, exception.MigrationPreCheckError) as e:
LOG.debug("Skipping host: %(host)s because: %(e)s",
{"host": host, "e": e})
attempted_hosts.append(host)
# The scheduler would have created allocations against the
# selected destination host in Placement, so we need to remove
# those before moving on.
self._remove_host_allocations(selection.compute_node_uuid)
host = None
# TODO(artom) We should probably just return the whole selection object
# at this point.
return (selection.service_host, selection.nodename, selection.limits)
def _remove_host_allocations(self, compute_node_uuid):
"""Removes instance allocations against the given node from Placement
:param compute_node_uuid: UUID of ComputeNode resource provider
"""
# Now remove the allocations for our instance against that node.
# Note that this does not remove allocations against any other node
# or shared resource provider, it's just undoing what the scheduler
# allocated for the given (destination) node.
self.report_client.remove_provider_tree_from_instance_allocation(
self.context, self.instance.uuid, compute_node_uuid)
def _check_not_over_max_retries(self, attempted_hosts):
if CONF.migrate_max_retries == -1:
return
retries = len(attempted_hosts) - 1
if retries > CONF.migrate_max_retries:
if self.migration:
self.migration.status = 'failed'
self.migration.save()
msg = (_('Exceeded max scheduling retries %(max_retries)d for '
'instance %(instance_uuid)s during live migration')
% {'max_retries': retries,
'instance_uuid': self.instance.uuid})
raise exception.MaxRetriesExceeded(reason=msg)
| apache-2.0 | -4,334,092,726,895,962,600 | 48.110124 | 79 | 0.619914 | false |
samuelclay/NewsBlur | vendor/timezones/timezones_tests/tests.py | 1 | 6356 | import re
from datetime import datetime
import pytz
from django import forms
from django.conf import settings
from django.test import TestCase
import vendor.timezones.forms
import vendor.timezones.timezones_tests.models as test_models
from vendor.timezones.utilities import localtime_for_timezone, adjust_datetime_to_timezone
from vendor import timezones
class TimeZoneTestCase(TestCase):
def setUp(self):
# ensure UTC
self.ORIGINAL_TIME_ZONE = settings.TIME_ZONE
settings.TIME_ZONE = "UTC"
def tearDown(self):
settings.TIME_ZONE = self.ORIGINAL_TIME_ZONE
# little helpers
def assertFormIsValid(self, form):
is_valid = form.is_valid()
self.assertTrue(is_valid,
"Form did not validate (errors=%r, form=%r)" % (form._errors, form)
)
class UtilsTestCase(TimeZoneTestCase):
def test_localtime_for_timezone(self):
self.assertEqual(
localtime_for_timezone(
datetime(2008, 6, 25, 18, 0, 0), "America/Denver"
).strftime("%m/%d/%Y %H:%M:%S"),
"06/25/2008 12:00:00"
)
def test_adjust_datetime_to_timezone(self):
self.assertEqual(
adjust_datetime_to_timezone(
datetime(2008, 6, 25, 18, 0, 0), "UTC"
).strftime("%m/%d/%Y %H:%M:%S"),
"06/25/2008 18:00:00"
)
class TimeZoneFieldTestCase(TimeZoneTestCase):
def test_forms_clean_required(self):
f = timezones.forms.TimeZoneField()
self.assertEqual(
repr(f.clean("US/Eastern")),
"<DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD>"
)
self.assertRaises(forms.ValidationError, f.clean, "")
def test_forms_clean_not_required(self):
f = timezones.forms.TimeZoneField(required=False)
self.assertEqual(
repr(f.clean("US/Eastern")),
"<DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD>"
)
self.assertEqual(f.clean(""), "")
def test_forms_clean_bad_value(self):
f = timezones.forms.TimeZoneField()
try:
f.clean("BAD VALUE")
except forms.ValidationError as e:
self.assertEqual(e.messages, ["Select a valid choice. BAD VALUE is not one of the available choices."])
def test_models_as_a_form(self):
class ProfileForm(forms.ModelForm):
class Meta:
model = test_models.Profile
fields = "__all__"
form = ProfileForm()
rendered = form.as_p()
self.assertTrue(
bool(re.search(r'<option value="[\w/]+">\([A-Z]+(?:\+|\-)\d{4}\)\s[\w/]+</option>', rendered)),
"Did not find pattern in rendered form"
)
def test_models_modelform_validation(self):
class ProfileForm(forms.ModelForm):
class Meta:
model = test_models.Profile
fields = "__all__"
now = datetime.now()
tz = "America/Denver"
tz = "(GMT%s) %s" % (now.strftime("%z"), tz)
form = ProfileForm({"name": "Brian Rosner", "timezone": tz})
self.assertFormIsValid(form)
def test_models_modelform_save(self):
class ProfileForm(forms.ModelForm):
class Meta:
model = test_models.Profile
fields = "__all__"
tz = "America/Denver"
now = datetime.now()
tz = "(GMT%s) %s" % (now.strftime("%z"), tz)
form = ProfileForm({"name": "Brian Rosner", "timezone": tz})
self.assertFormIsValid(form)
p = form.save()
def test_models_string_value(self):
p = test_models.Profile(name="Brian Rosner", timezone="America/Denver")
p.save()
p = test_models.Profile.objects.get(pk=p.pk)
self.assertEqual(p.timezone, pytz.timezone("America/Denver"))
def test_models_string_value_lookup(self):
test_models.Profile(name="Brian Rosner", timezone="America/Denver").save()
qs = test_models.Profile.objects.filter(timezone="America/Denver")
self.assertEqual(qs.count(), 1)
def test_models_tz_value(self):
tz = pytz.timezone("America/Denver")
p = test_models.Profile(name="Brian Rosner", timezone=tz)
p.save()
p = test_models.Profile.objects.get(pk=p.pk)
self.assertEqual(p.timezone, tz)
def test_models_tz_value_lookup(self):
test_models.Profile(name="Brian Rosner", timezone="America/Denver").save()
qs = test_models.Profile.objects.filter(timezone=pytz.timezone("America/Denver"))
self.assertEqual(qs.count(), 1)
class LocalizedDateTimeFieldTestCase(TimeZoneTestCase):
def test_forms_clean_required(self):
# the default case where no timezone is given explicitly. uses settings.TIME_ZONE.
f = timezones.forms.LocalizedDateTimeField()
self.assertEqual(
repr(f.clean("2008-05-30 14:30:00")),
"datetime.datetime(2008, 5, 30, 14, 30, tzinfo=<UTC>)"
)
self.assertRaises(forms.ValidationError, f.clean, "")
def test_forms_clean_required(self):
# the default case where no timezone is given explicitly. uses settings.TIME_ZONE.
f = timezones.forms.LocalizedDateTimeField(required=False)
self.assertEqual(
repr(f.clean("2008-05-30 14:30:00")),
"datetime.datetime(2008, 5, 30, 14, 30, tzinfo=<UTC>)"
)
self.assertEqual(f.clean(""), None)
# @@@ old doctests that have not been finished (largely due to needing to
# better understand how these bits were created and use-cases)
NOT_USED = {"API_TESTS": r"""
>>> class Foo(object):
... datetime = datetime(2008, 6, 20, 23, 58, 17)
... @decorators.localdatetime('datetime')
... def localdatetime(self):
... return 'Australia/Lindeman'
...
>>> foo = Foo()
>>> foo.datetime
datetime.datetime(2008, 6, 20, 23, 58, 17)
>>> foo.localdatetime
datetime.datetime(2008, 6, 21, 9, 58, 17, tzinfo=<DstTzInfo 'Australia/Lindeman' EST+10:00:00 STD>)
>>> foo.localdatetime = datetime(2008, 6, 12, 23, 50, 0)
>>> foo.datetime
datetime.datetime(2008, 6, 12, 13, 50, tzinfo=<UTC>)
>>> foo.localdatetime
datetime.datetime(2008, 6, 12, 23, 50, tzinfo=<DstTzInfo 'Australia/Lindeman' EST+10:00:00 STD>)
"""} | mit | -5,622,830,680,945,569,000 | 34.316667 | 115 | 0.603367 | false |
bgribble/mfp | mfp/test/test-dsp.py | 1 | 1777 |
from unittest import TestCase
from mfp.mfp_app import MFPApp
from mfp.patch import Patch
from mfp.scope import NaiveScope
def setup():
MFPApp().setup()
def mkproc(case, init_type, init_args=None):
return MFPApp().create(init_type, init_args, case.patch, None, init_type)
class DSPObjectTests (TestCase):
def setUp(self):
self.patch = Patch('default', '', None, NaiveScope(), 'default')
def tearDown(self):
import time
time.sleep(0.500)
def test_create(self):
'''test_create: [dsp] can make a DSP object'''
o = mkproc(self, "osc~", "500")
def test_read(self):
'''test_read: [dsp] can read back a creation parameter'''
o = mkproc(self, "osc~", "500")
print("test_read: objid = ", o, o.dsp_obj)
f = o.dsp_obj.getparam("_sig_1")
print(f)
assert f == 500
def test_connect_disconnect(self):
'''test_connect_disconnect: [dsp] make/break connections'''
print("============= Creating in~")
inp = mkproc(self, "in~", "0")
print("============= Creating out~")
outp = mkproc(self, "out~", "0")
print("============= Created objects")
inp.connect(0, outp, 0)
print("============= Called connect")
inp.disconnect(0, outp, 0)
print("============== disconnected")
def test_delete(self):
'''test_destroy: [dsp] destroy dsp object'''
print("Creating")
inp = mkproc(self, "in~", "0")
outp = mkproc(self, "out~", "0")
print("connecting")
inp.connect(0, outp, 0)
print("deleting")
outp.delete()
inp.delete()
print("done")
def teardown():
MFPApp().finish()
print("test-dsp.py: MFPApp finish done")
| gpl-2.0 | 1,709,242,076,048,338,700 | 27.206349 | 77 | 0.546427 | false |
project-icp/bee-pollinator-app | src/icp/icp/celery.py | 1 | 3824 | from __future__ import absolute_import
import os
import rollbar
import logging
from celery import Celery
from celery._state import connect_on_app_finalize
from celery.signals import task_failure
from django.conf import settings
@connect_on_app_finalize
def add_unlock_chord_task_shim(app):
"""
Override native unlock_chord to support configurable max_retries.
Original code taken from https://goo.gl/3mX0ie
This task is used by result backends without native chord support.
It joins chords by creating a task chain polling the header for completion.
"""
from celery.canvas import maybe_signature
from celery.exceptions import ChordError
from celery.result import allow_join_result, result_from_tuple
logger = logging.getLogger(__name__)
MAX_RETRIES = settings.CELERY_CHORD_UNLOCK_MAX_RETRIES
@app.task(name='celery.chord_unlock', shared=False, default_retry_delay=1,
ignore_result=True, lazy=False, bind=True,
max_retries=MAX_RETRIES)
def unlock_chord(self, group_id, callback, interval=None,
max_retries=MAX_RETRIES, result=None,
Result=app.AsyncResult, GroupResult=app.GroupResult,
result_from_tuple=result_from_tuple, **kwargs):
if interval is None:
interval = self.default_retry_delay
# check if the task group is ready, and if so apply the callback.
callback = maybe_signature(callback, app)
deps = GroupResult(
group_id,
[result_from_tuple(r, app=app) for r in result],
app=app,
)
j = deps.join_native if deps.supports_native_join else deps.join
try:
ready = deps.ready()
except Exception as exc:
raise self.retry(
exc=exc, countdown=interval, max_retries=max_retries)
else:
if not ready:
raise self.retry(countdown=interval, max_retries=max_retries)
callback = maybe_signature(callback, app=app)
try:
with allow_join_result():
ret = j(timeout=3.0, propagate=True)
except Exception as exc:
try:
culprit = next(deps._failed_join_report())
reason = 'Dependency {0.id} raised {1!r}'.format(
culprit, exc,
)
except StopIteration:
reason = repr(exc)
logger.error('Chord %r raised: %r', group_id, exc, exc_info=1)
app.backend.chord_error_from_stack(callback,
ChordError(reason))
else:
try:
callback.delay(ret)
except Exception as exc:
logger.error('Chord %r raised: %r', group_id, exc, exc_info=1)
app.backend.chord_error_from_stack(
callback,
exc=ChordError('Callback error: {0!r}'.format(exc)),
)
return unlock_chord
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'icp.settings.production')
app = Celery('icp')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
rollbar_settings = getattr(settings, 'ROLLBAR', {})
if rollbar_settings:
rollbar.init(rollbar_settings.get('access_token'),
rollbar_settings.get('environment'))
@task_failure.connect
def handle_task_failure(**kw):
if rollbar_settings:
rollbar.report_exc_info(extra_data=kw)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| apache-2.0 | 5,018,780,426,498,828,000 | 33.45045 | 79 | 0.613494 | false |
h4ng3r/radare2 | sys/meson.py | 1 | 10237 | """Meson build for radare2"""
import argparse
import glob
import logging
import os
import re
import shutil
import subprocess
import sys
BUILDDIR = 'build'
BACKENDS = ['ninja', 'vs2015', 'vs2017']
PATH_FMT = {}
MESON = None
ROOT = None
log = None
def set_global_variables():
"""[R_API] Set global variables"""
global log
global ROOT
global MESON
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
logging.basicConfig(format='[%(name)s][%(levelname)s]: %(message)s',
level=logging.DEBUG)
log = logging.getLogger('r2-meson')
with open(os.path.join(ROOT, 'configure.acr')) as f:
f.readline()
version = f.readline().split()[1].rstrip()
if os.name == 'nt':
meson = os.path.join(os.path.dirname(sys.executable), 'Scripts', 'meson.py')
MESON = [sys.executable, meson]
else:
MESON = ['meson']
PATH_FMT['ROOT'] = ROOT
PATH_FMT['R2_VERSION'] = version
log.debug('Root: %s', ROOT)
log.debug('Meson: %s', MESON)
log.debug('Version: %s', version)
def meson(root, build, prefix=None, backend=None,
release=False, shared=False, *, options=[]):
"""[R_API] Invoke meson"""
command = MESON + [root, build]
if prefix:
command.append('--prefix={}'.format(prefix))
if backend:
command.append('--backend={}'.format(backend))
if release:
command.append('--buildtype=release')
if shared:
command.append('--default-library=shared')
else:
command.append('--default-library=static')
if options:
command.extend(options)
log.debug('Invoking meson: %s', command)
ret = subprocess.call(command)
if ret != 0:
log.error('Meson error. Exiting.')
sys.exit(1)
def ninja(folder, *targets):
"""[R_API] Invoke ninja"""
command = ['ninja', '-C', folder]
if targets:
command.extend(targets)
log.debug('Invoking ninja: %s', command)
ret = subprocess.call(command)
if ret != 0:
log.error('Ninja error. Exiting.')
sys.exit(1)
def msbuild(project, *params):
"""[R_API] Invoke MSbuild"""
command = ['msbuild', project]
if params:
command.extend(params)
log.info('Invoking MSbuild: %s', command)
ret = subprocess.call(command)
if ret != 0:
log.error('MSbuild error. Exiting.')
sys.exit(1)
def copytree(src, dst, exclude=()):
src = src.format(**PATH_FMT)
dst = dst.format(**PATH_FMT)
log.debug('copytree "%s" -> "%s"', src, dst)
shutil.copytree(src, dst, ignore=shutil.ignore_patterns(*exclude) if exclude else None)
def move(src, dst):
src = src.format(**PATH_FMT)
dst = dst.format(**PATH_FMT)
term = os.path.sep if os.path.isdir(dst) else ''
log.debug('move "%s" -> "%s%s"', src, dst, term)
for file in glob.iglob(src):
shutil.move(file, dst)
def copy(src, dst):
src = src.format(**PATH_FMT)
dst = dst.format(**PATH_FMT)
term = os.path.sep if os.path.isdir(dst) else ''
log.debug('copy "%s" -> "%s%s"', src, dst, term)
for file in glob.iglob(src, recursive='**' in src):
shutil.copy2(file, dst)
def makedirs(path):
path = path.format(**PATH_FMT)
log.debug('makedirs "%s"', path)
os.makedirs(path)
def xp_compat(builddir):
log.info('Running XP compat script')
with open(os.path.join(builddir, 'REGEN.vcxproj'), 'r') as f:
version = re.search('<PlatformToolset>(.*)</PlatformToolset>', f.read()).group(1)
if version.endswith('_xp'):
log.info('Skipping %s', builddir)
return
log.debug('Translating from %s to %s_xp', version, version)
newversion = version+'_xp'
for f in glob.iglob(os.path.join(builddir, '**', '*.vcxproj'), recursive=True):
with open(f, 'r') as proj:
c = proj.read()
c = c.replace(version, newversion)
with open(f, 'w') as proj:
proj.write(c)
log.debug("%s .. OK", f)
def vs_dedup(builddir):
"""Remove duplicated dependency entries from vs project"""
start = '<AdditionalDependencies>'
end = ';%(AdditionalDependencies)'
for f in glob.iglob(os.path.join(builddir, '**', '*.vcxproj'), recursive=True):
with open(f) as proj:
data = proj.read()
idx = data.find(start)
if idx < 0:
continue
idx += len(start)
idx2 = data.find(end, idx)
if idx2 < 0:
continue
libs = set(data[idx:idx2].split(';'))
with open(f, 'w') as proj:
proj.write(data[:idx])
proj.write(';'.join(sorted(libs)))
proj.write(data[idx2:])
log.debug('%s processed', f)
def win_dist(args):
"""Create r2 distribution for Windows"""
builddir = os.path.join(ROOT, args.dir)
PATH_FMT['DIST'] = args.install
PATH_FMT['BUILDDIR'] = builddir
makedirs(r'{DIST}')
copy(r'{BUILDDIR}\binr\*\*.exe', r'{DIST}')
copy(r'{BUILDDIR}\libr\*\*.dll', r'{DIST}')
makedirs(r'{DIST}\lib')
if args.shared:
copy(r'{BUILDDIR}\libr\*\*.lib', r'{DIST}\lib')
else:
copy(r'{BUILDDIR}\libr\*\*.a', r'{DIST}\lib')
copy(r'{BUILDDIR}\shlr\libr_shlr.a', r'{DIST}\lib')
win_dist_libr2()
def win_dist_libr2(**path_fmt):
"""[R_API] Add libr2 data/www/include/doc to dist directory"""
PATH_FMT.update(path_fmt)
copytree(r'{ROOT}\shlr\www', r'{DIST}\www')
copytree(r'{ROOT}\libr\magic\d\default', r'{DIST}\share\radare2\{R2_VERSION}\magic')
makedirs(r'{DIST}\share\radare2\{R2_VERSION}\syscall')
copy(r'{BUILDDIR}\libr\syscall\d\*.sdb', r'{DIST}\share\radare2\{R2_VERSION}\syscall')
makedirs(r'{DIST}\share\radare2\{R2_VERSION}\fcnsign')
copy(r'{BUILDDIR}\libr\anal\d\*.sdb', r'{DIST}\share\radare2\{R2_VERSION}\fcnsign')
makedirs(r'{DIST}\share\radare2\{R2_VERSION}\opcodes')
copy(r'{BUILDDIR}\libr\asm\d\*.sdb', r'{DIST}\share\radare2\{R2_VERSION}\opcodes')
makedirs(r'{DIST}\include\libr\sdb')
makedirs(r'{DIST}\include\libr\r_util')
copy(r'{ROOT}\libr\include\*.h', r'{DIST}\include\libr')
copy(r'{BUILDDIR}\r_version.h', r'{DIST}\include\libr')
copy(r'{BUILDDIR}\r_userconf.h', r'{DIST}\include\libr')
copy(r'{ROOT}\libr\include\sdb\*.h', r'{DIST}\include\libr\sdb')
copy(r'{ROOT}\libr\include\r_util\*.h', r'{DIST}\include\libr\r_util')
makedirs(r'{DIST}\share\doc\radare2')
copy(r'{ROOT}\doc\fortunes.*', r'{DIST}\share\doc\radare2')
copytree(r'{ROOT}\libr\bin\d', r'{DIST}\share\radare2\{R2_VERSION}\format',
exclude=('Makefile', 'meson.build', 'dll'))
makedirs(r'{DIST}\share\radare2\{R2_VERSION}\format\dll')
copy(r'{BUILDDIR}\libr\bin\d\*.sdb', r'{DIST}\share\radare2\{R2_VERSION}\format\dll')
copytree(r'{ROOT}\libr\cons\d', r'{DIST}\share\radare2\{R2_VERSION}\cons',
exclude=('Makefile', 'meson.build'))
makedirs(r'{DIST}\share\radare2\{R2_VERSION}\hud')
copy(r'{ROOT}\doc\hud', r'{DIST}\share\radare2\{R2_VERSION}\hud\main')
def build(args):
""" Build radare2 """
log.info('Building radare2')
r2_builddir = os.path.join(ROOT, args.dir)
options = ['-D%s' % x for x in args.options]
if not os.path.exists(r2_builddir):
meson(ROOT, r2_builddir, prefix=args.prefix, backend=args.backend,
release=args.release, shared=args.shared, options=options)
if args.backend != 'ninja':
vs_dedup(r2_builddir)
if args.xp:
xp_compat(r2_builddir)
if not args.project:
project = os.path.join(r2_builddir, 'radare2.sln')
msbuild(project, '/m')
else:
ninja(r2_builddir)
def install(args):
""" Install radare2 """
if os.name == 'nt':
win_dist(args)
return
log.warning('Install not implemented yet for this platform.')
# TODO
#if os.name == 'posix':
# os.system('DESTDIR="{destdir}" ninja -C {build} install'
# .format(destdir=destdir, build=args.dir))
def main():
# Create logger and get applications paths
set_global_variables()
# Create parser
parser = argparse.ArgumentParser(description='Mesonbuild scripts for radare2')
parser.add_argument('--project', action='store_true',
help='Create a visual studio project and do not build.')
parser.add_argument('--release', action='store_true',
help='Set the build as Release (remove debug info)')
parser.add_argument('--backend', choices=BACKENDS, default='ninja',
help='Choose build backend (default: %(default)s)')
parser.add_argument('--shared', action='store_true',
help='Link dynamically (shared library) rather than statically')
parser.add_argument('--prefix', default=None,
help='Set project installation prefix')
parser.add_argument('--dir', default=BUILDDIR, required=False,
help='Destination build directory (default: %(default)s)')
parser.add_argument('--xp', action='store_true',
help='Adds support for Windows XP')
if os.name == 'nt':
parser.add_argument('--install', help='Installation directory')
else:
parser.add_argument('--install', action='store_true',
help='Install radare2 after building')
parser.add_argument('--options', nargs='*', default=[])
args = parser.parse_args()
# Check arguments
if args.project and args.backend == 'ninja':
log.error('--project is not compatible with --backend ninja')
sys.exit(1)
if args.xp and args.backend == 'ninja':
log.error('--xp is not compatible with --backend ninja')
sys.exit(1)
if os.name == 'nt' and args.install and os.path.exists(args.install):
log.error('%s already exists', args.install)
sys.exit(1)
if os.name == 'nt' and not args.prefix:
args.prefix = os.path.join(ROOT, args.dir, 'priv_install_dir')
for o in args.options:
if not '=' in o:
log.error('Invalid option: %s', o)
sys.exit(1)
# Build it!
log.debug('Arguments: %s', args)
build(args)
if args.install:
install(args)
if __name__ == '__main__':
main()
| lgpl-3.0 | -6,741,204,642,511,179,000 | 34.058219 | 91 | 0.601348 | false |
IdanMann/SnapshotGenerator | snapgen.py | 1 | 5427 | from PIL import Image
from resources import common
import settings
class SnapshotGenerator:
def __init__(self, base_available_snapshot_image, skeleton, bid_image=None,
base_unavailable_snapshot_image=None):
# Initialize objects
self.elements_skeleton = BaseElementsSkeleton(skeleton=skeleton)
self.image_template = BaseImageTemplate(base_available_snapshot_image=base_available_snapshot_image,
base_unavailable_snapshot_image=base_unavailable_snapshot_image)
self.bid_image_template = BaseBidImageTemplate(bid_image=bid_image)
# Validate integrity
self.image_template.verify()
self.elements_skeleton.verify(self.image_template.get_available_image_size(),
self.image_template.get_unavailable_image_size())
self.bid_image_template.verify(self.image_template.get_available_image_size()[0],
self.image_template.get_available_image_size()[1])
def add_bid(self, bid_data):
# Extend base_available_snapshot with a slot
raise NotImplementedError
def set_title(self):
raise NotImplementedError
class BaseImageTemplate:
# Image Template, receives the images used to generate the snapshot and an ElementsSkeleton object
def __init__(self, base_available_snapshot_image, base_unavailable_snapshot_image=None):
try:
self.base_available_snapshot_image = Image.open(base_available_snapshot_image).convert('RGBA')
self.base_unavailable_snapshot_image = Image.open(base_unavailable_snapshot_image)\
if base_unavailable_snapshot_image else self.base_available_snapshot_image
except Exception as e:
# Failed to open base image files
raise Exception(e)
self.base_available_max_x, self.base_available_max_y = self.base_available_snapshot_image.size()
self.base_unavailable_max_x, self.base_unavailable_max_y = self.base_unavailable_snapshot_image.size()
def verify(self):
# Ensure images past are of valid dimensions
# check that both templates are of consistent dimensions
assert self.base_available_max_x == self.base_unavailable_max_x, \
"X dimensions for the base images are not equal"
assert self.base_available_max_y == self.base_unavailable_max_y, \
"Y dimensions for the base images are not equal"
def get_available_image_size(self):
return self.base_available_snapshot_image.size()
def get_unavailable_image_size(self):
return self.base_unavailable_snapshot_image.size()
def _extend_edge(self):
# This method can be used to extend the base image size to allow big elements to fit in
raise NotImplementedError
class BaseElementsSkeleton:
# Basic snapshot elements meta data
def __init__(self, skeleton):
self.meta_data = skeleton.get(common.META_DATA)
self.field_mapping = skeleton.get(common.MAPPING)
assert isinstance(self.meta_data, dict),\
"Could not load meta data using the key: {meta_data}".format(meta_data=common.META_DATA)
assert isinstance(self.field_mapping, dict),\
"Could not load mapping using the key: {mapping}".format(mapping=common.MAPPING)
# Title
title_key = self.field_mapping.get("title")
title_font = self.meta_data.get("title_font", settings.DEFAULT_FONT)
title_color = common.create_rgba_color_tuple(self.meta_data.get("title_color", settings.DEFAULT_COLOR_STRING))
self.title_x = self.meta_data.get("title_x_position", 0)
self.title_y = self.meta_data.get("title_y_position", 0)
# Bid
self.first_bid_x, self.first_bid_y = (0, 0)
def verify(self, base_available_xy=(0, 0), base_unavailable_xy=(0, 0)):
# check that title is not out of bounds
assert self.title_x >= 0, "Title's X dimension must be 0 or higher"
assert self.title_y >= 0, "Title's Y dimension must be 0 or higher"
assert self.title_x <= base_available_xy[0] and self.title_x <= base_unavailable_xy[0],\
"Title's X position is out of the image boundaries"
assert self.title_y <= base_available_xy[1] and self.title_y <= base_unavailable_xy[1],\
"Title's Y position is out of the image boundaries"
# check that the first bid is not out of bounds
assert self.first_bid_x >= 0, "First bid's X dimension must be 0 or higher"
assert self.first_bid_y >= 0, "First bid's Y dimension must be 0 or higher"
class BaseBidImageTemplate:
# Base bid object with all parameters to create a bid
def __init__(self, bid_image):
assert bid_image, "Could not find a bid image to use"
try:
self.bid_image = Image.open(bid_image)
except Exception as e:
raise Exception(e)
self.bid_max_x, self.bid_max_y = self.bid_image.size()
def verify(self, base_available_max_x, base_available_max_y):
# check that the first bid is not out of bounds
assert self.bid_max_x <= base_available_max_x, \
"X dimensions for the bid image are bigger than the base image"
assert self.bid_max_y <= base_available_max_y, \
"Y dimensions for the bid image are bigger than the base image"
| mit | -1,809,330,954,875,185,000 | 46.191304 | 118 | 0.657638 | false |
distributed-system-analysis/pbench | lib/pbench/server/api/resources/query_apis/controllers_list.py | 1 | 6159 | from flask import jsonify
from logging import Logger
from typing import Any, AnyStr, Dict
from pbench.server import PbenchServerConfig
from pbench.server.api.resources.query_apis import (
ElasticBase,
Schema,
Parameter,
ParamType,
PostprocessError,
)
class ControllersList(ElasticBase):
"""
Get the names of controllers within a date range.
"""
def __init__(self, config: PbenchServerConfig, logger: Logger):
super().__init__(
config,
logger,
Schema(
Parameter("user", ParamType.USER, required=False),
Parameter("start", ParamType.DATE, required=True),
Parameter("end", ParamType.DATE, required=True),
),
)
def assemble(self, json_data: Dict[AnyStr, Any]) -> Dict[AnyStr, Any]:
"""
Construct a search for Pbench controller names which have registered
datasets within a specified date range and which are either owned
by a specified username, or have been made publicly accessible.
{
"user": "username",
"start": "start-time",
"end": "end-time"
}
json_data: JSON dictionary of type-normalized parameters
user: specifies the owner of the data to be searched; it need not
necessarily be the user represented by the session token
header, assuming the session user is authorized to view "user"s
data. If "user": None is specified, then only public datasets
will be returned.
TODO: When we have authorization infrastructure, we'll need to
check that "session user" has rights to view "user" data. We might
also default a missing "user" JSON field with the authorization
token's user. This would require a different mechanism to signal
"return public data"; for example, we could specify either
"access": "public", "access": "private", or "access": "all" to
include both private and public data.
"start" and "end" are datetime objects representing a set of Elasticsearch
run document indices in which to search.
"""
user = json_data.get("user")
start = json_data.get("start")
end = json_data.get("end")
# We need to pass string dates as part of the Elasticsearch query; we
# use the unconverted strings passed by the caller rather than the
# adjusted and normalized datetime objects for this.
start_arg = f"{start:%Y-%m}"
end_arg = f"{end:%Y-%m}"
self.logger.info(
"Discover controllers for user {}, prefix {}: ({} - {})",
user,
self.prefix,
start,
end,
)
uri_fragment = self._gen_month_range("run", start, end)
return {
"path": f"/{uri_fragment}/_search",
"kwargs": {
"json": {
"query": {
"bool": {
"filter": [
{"term": self._get_user_term(user)},
{
"range": {
"@timestamp": {"gte": start_arg, "lte": end_arg}
}
},
]
}
},
"size": 0, # Don't return "hits", only aggregations
"aggs": {
"controllers": {
"terms": {
"field": "run.controller",
"order": [{"runs": "desc"}],
},
"aggs": {"runs": {"max": {"field": "run.start"}}},
}
},
},
"params": {"ignore_unavailable": "true"},
},
}
def postprocess(self, es_json: Dict[AnyStr, Any]) -> Dict[AnyStr, Any]:
"""
Returns a summary of the returned Elasticsearch query results, showing
the Pbench controller name, the number of runs using that controller
name, and the start timestamp of the latest run both in binary and
string form:
[
{
"key": "alphaville.example.com",
"controller": "alphaville.example.com",
"results": 2,
"last_modified_value": 1598473155810.0,
"last_modified_string": "2020-08-26T20:19:15.810Z"
}
]
"""
controllers = []
# If there are no matches for the user, controller name,
# and time range, return the empty list rather than failing.
# Note that we can't check the length of ["hits"]["hits"]
# because we've told Elasticsearch to return only aggregations,
# not source documents.
try:
count = es_json["hits"]["total"]["value"]
if int(count) == 0:
self.logger.warning("No data returned by Elasticsearch")
return jsonify(controllers)
except KeyError as e:
raise PostprocessError(
f"Can't find Elasticsearch match data {e} in {es_json!r}"
)
except ValueError as e:
raise PostprocessError(f"Elasticsearch hit count {count!r} value: {e}")
buckets = es_json["aggregations"]["controllers"]["buckets"]
self.logger.info("{} controllers found", len(buckets))
for controller in buckets:
c = {}
c["key"] = controller["key"]
c["controller"] = controller["key"]
c["results"] = controller["doc_count"]
c["last_modified_value"] = controller["runs"]["value"]
c["last_modified_string"] = controller["runs"]["value_as_string"]
controllers.append(c)
# construct response object
return jsonify(controllers)
| gpl-3.0 | -8,051,723,155,547,731,000 | 38.480769 | 88 | 0.508686 | false |
dg321123/cache | response_filter.py | 1 | 1814 | import json
# This assumes that only list responses are split across pages. I don't like it, but
# it gets me started quickly, punting the question about handling response formats to
# the future.
def coalesce_response(response, n):
collection = []
for page in response:
list_response = json.loads(page)
if isinstance(list_response, list):
collection += list_response
else:
collection = list_response
return collection
# Method to return the top 'n' responses
def top_response_filter(response, n):
collection = coalesce_response(response, n)
return collection[:n]
# Method to return the bottom 'n' responses
def bottom_response_filter(response, n):
collection = coalesce_response(response, n)
return collection[-1 * n:]
# This method can be extended to incorporate other filter types, say average or sum of top n elements.
def response_filter(response, filter_type, count):
if filter_type == 'top':
filter_method = top_response_filter
elif filter_type == 'bottom':
filter_method = bottom_response_filter
else:
filter_method = coalesce_response
return filter_method(response, count)
# Split the path into 3 parts -
# 1. key = key into the cache
# 2. filter_type = kind of filter to apply on the response from the cache
# 3. count = limit the number of response elements
# In the future, you can add other filters such as mean, median, etc.
def path_to_parts(path):
parts = path.split('/')
key = ''
filter_type = ''
count = 0
for part in parts:
if part == 'top' or part == 'bottom':
filter_type = part
elif part.isdigit():
count = int(part)
else:
key += '/' + part
return [key, filter_type, count] | gpl-2.0 | -4,825,386,514,783,277,000 | 28.754098 | 102 | 0.651599 | false |
jmakov/ggrc-core | test/integration/ggrc/converters/test_import_comprehensive.py | 1 | 8254 | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc import db
from ggrc.models import AccessGroup
from ggrc.models import Program
from ggrc.converters import errors
from ggrc_basic_permissions import Role
from ggrc_basic_permissions import UserRole
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
class TestComprehensiveSheets(TestCase):
"""
test sheet from:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=0
"""
def setUp(self):
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
pass
def tearDown(self):
pass
def test_comprehensive_sheet1_with_custom_attributes(self):
self.create_custom_attributes()
self.create_people()
filename = "comprehensive_sheet1.csv"
response = self.import_file(filename)
indexed = {r["name"]: r for r in response}
expected = {
"Control": {
"created": 14,
"ignored": 2,
"row_errors": 2,
"row_warnings": 3,
"rows": 16,
},
"Objective": {
"created": 8,
"ignored": 7,
"row_errors": 5,
"row_warnings": 4,
"rows": 15,
},
"Program": {
"created": 13,
"ignored": 3,
"row_errors": 3,
"row_warnings": 4,
"rows": 16,
},
"Issue": {
"created": 10,
"ignored": 4,
"row_errors": 4,
"row_warnings": 4,
"rows": 14,
},
"Policy": {
"created": 13,
"ignored": 3,
"row_errors": 3,
"row_warnings": 4,
"rows": 16,
},
"Regulation": {
"created": 13,
"ignored": 2,
"row_errors": 3,
"row_warnings": 3,
"rows": 15,
},
"Standard": {
"created": 14,
"ignored": 2,
"row_errors": 3,
"row_warnings": 5,
"rows": 16,
},
"Contract": {
"created": 14,
"ignored": 2,
"row_errors": 3,
"row_warnings": 4,
"rows": 16,
},
"System": {
"created": 14,
"ignored": 2,
"row_errors": 3,
"row_warnings": 4,
"rows": 16,
},
"Clause": {
"created": 14,
"ignored": 2,
"row_errors": 3,
"row_warnings": 4,
"rows": 16,
},
"Process": {
"created": 14,
"ignored": 2,
"row_errors": 3,
"row_warnings": 4,
"rows": 16,
},
"Data Asset": {
"created": 14,
"ignored": 2,
"row_errors": 3,
"row_warnings": 4,
"rows": 16,
},
"Product": {
"created": 14,
"ignored": 2,
"row_errors": 3,
"row_warnings": 4,
"rows": 16,
},
"Project": {
"created": 8,
"ignored": 0,
"row_errors": 0,
"row_warnings": 0,
"rows": 8,
},
"Facility": {
"created": 14,
"ignored": 2,
"row_errors": 3,
"row_warnings": 4,
"rows": 16,
},
"Market": {
"created": 13,
"ignored": 2,
"row_errors": 3,
"row_warnings": 3,
"rows": 15,
},
"Org Group": {
"created": 13,
"ignored": 2,
"row_errors": 3,
"row_warnings": 3,
"rows": 15,
},
"Vendor": {
"created": 13,
"ignored": 2,
"row_errors": 3,
"row_warnings": 3,
"rows": 15,
},
"Person": {
"created": 9,
"ignored": 1,
"row_errors": 1,
"row_warnings": 0,
"rows": 10,
}
}
# general numbers check
for name, data in expected.items():
current = indexed[name]
self.assertEqual(current["rows"], data["rows"], name)
self.assertEqual(current["ignored"], data["ignored"], name)
self.assertEqual(current["created"], data["created"], name)
self.assertEqual(len(current["row_errors"]), data["row_errors"], name)
self.assertEqual(
len(current["row_warnings"]), data["row_warnings"], name)
prog = Program.query.filter_by(slug="prog-8").first()
self.assertEqual(prog.title, "program 8")
self.assertEqual(prog.status, "Draft")
self.assertEqual(prog.description, "test")
custom_vals = [v.attribute_value for v in prog.custom_attribute_values]
expected_custom_vals = ['0', 'a', '2015-12-12 00:00:00', 'test1']
self.assertEqual(set(custom_vals), set(expected_custom_vals))
def test_full_good_import_no_warnings(self):
filename = "full_good_import_no_warnings.csv"
messages = ("block_errors", "block_warnings", "row_errors", "row_warnings")
response = self.import_file(filename, dry_run=True)
for block in response:
for message in messages:
self.assertEqual(set(), set(block[message]))
response = self.import_file(filename)
for message in messages: # response[0] = Person block
self.assertEqual(set(response[0][message]), set())
ggrc_admin = db.session.query(Role.id).filter(Role.name == "gGRC Admin")
reader = db.session.query(Role.id).filter(Role.name == "Reader")
creator = db.session.query(Role.id).filter(Role.name == "Creator")
ggrc_admins = UserRole.query.filter(UserRole.role_id == ggrc_admin).all()
readers = UserRole.query.filter(UserRole.role_id == reader).all()
creators = UserRole.query.filter(UserRole.role_id == creator).all()
access_groups = db.session.query(AccessGroup).all()
self.assertEqual(len(ggrc_admins), 12)
self.assertEqual(len(readers), 5)
self.assertEqual(len(creators), 6)
self.assertEqual(len(access_groups), 10)
for block in response:
for message in messages:
self.assertEqual(set(), set(block[message]))
def test_errors_and_warnings(self):
"""
This test should test for all possible warnings and errors but it is still
incomplete.
"""
filename = "import_with_all_warnings_and_errors.csv"
dry_response = self.import_file(filename, dry_run=True)
response = self.import_file(filename)
block_messages = [
{ # warnings and error of the first imported block
"block_errors": set([
errors.DUPLICATE_COLUMN.format(
line=1, duplicates="Notes, Test Plan"),
])
}
]
self.assertEqual(dry_response, response)
messages = ("block_errors", "block_warnings", "row_errors", "row_warnings")
for message_block, response_block in zip(block_messages, response):
for message in messages:
self.assertEqual(
message_block.get(message, set()),
set(response_block[message])
)
def create_custom_attributes(self):
gen = self.generator.generate_custom_attribute
gen("control", title="my custom text", mandatory=True)
gen("program", title="my_text", mandatory=True)
gen("program", title="my_date", attribute_type="Date")
gen("program", title="my_checkbox", attribute_type="Checkbox")
gen("program", title="my_dropdown", attribute_type="Dropdown",
options="a,b,c,d")
# gen("program", title="my_description", attribute_type="Rich Text")
def create_people(self):
emails = [
"[email protected]",
"[email protected]",
"[email protected]",
"[email protected]",
]
for email in emails:
self.generator.generate_person({
"name": email.split("@")[0].title(),
"email": email,
}, "gGRC Admin")
| apache-2.0 | 5,902,148,574,953,953,000 | 29.014545 | 98 | 0.526896 | false |
42cc/apiclient-kava | setup.py | 1 | 1143 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import join, dirname
from setuptools import setup, find_packages
def get_version(fname='kavahq/__init__.py'):
with open(fname) as f:
for line in f:
if line.startswith('__version__'):
return eval(line.split('=')[-1])
setup(
name='kavahq-api',
version=get_version(),
packages=find_packages(),
requires=['python (>= 2.7)', ],
install_requires=['requests'],
tests_require=['mock', 'unittest2', 'nose', 'coverage'],
description='wrapper over kavahq.com API',
long_description=open(join(dirname(__file__), 'README.rst')).read(),
author='42 Coffee Cups',
author_email='[email protected]',
url='https://github.com/42cc/apiclient-kava',
download_url='https://github.com/42cc/apiclient-kava/archive/master.zip',
license='GPL v2 License',
keywords=['kavahq', 'api'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python',
],
)
| gpl-2.0 | 6,429,455,022,025,177,000 | 31.657143 | 77 | 0.616798 | false |
ric2b/Vivaldi-browser | chromium/tools/binary_size/diagnose_bloat.py | 1 | 34013 | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tool for finding the cause of binary size bloat.
See //tools/binary_size/README.md for example usage.
Note: this tool will perform gclient sync/git checkout on your local repo.
"""
from __future__ import print_function
import atexit
import argparse
import collections
from contextlib import contextmanager
import distutils.spawn
import json
import logging
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tempfile
import zipfile
_COMMIT_COUNT_WARN_THRESHOLD = 15
_ALLOWED_CONSECUTIVE_FAILURES = 2
_SRC_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
_DEFAULT_ARCHIVE_DIR = os.path.join(_SRC_ROOT, 'out', 'binary-size-results')
_DEFAULT_OUT_DIR = os.path.join(_SRC_ROOT, 'out', 'binary-size-build')
_BINARY_SIZE_DIR = os.path.join(_SRC_ROOT, 'tools', 'binary_size')
_RESOURCE_SIZES_PATH = os.path.join(
_SRC_ROOT, 'build', 'android', 'resource_sizes.py')
_LLVM_TOOLS_DIR = os.path.join(
_SRC_ROOT, 'third_party', 'llvm-build', 'Release+Asserts', 'bin')
_DOWNLOAD_OBJDUMP_PATH = os.path.join(
_SRC_ROOT, 'tools', 'clang', 'scripts', 'download_objdump.py')
_GN_PATH = os.path.join(_SRC_ROOT, 'third_party', 'depot_tools', 'gn')
_NINJA_PATH = os.path.join(_SRC_ROOT, 'third_party', 'depot_tools', 'ninja')
_DiffResult = collections.namedtuple('DiffResult', ['name', 'value', 'units'])
class BaseDiff(object):
"""Base class capturing binary size diffs."""
def __init__(self, name):
self.name = name
self.banner = '\n' + '*' * 30 + name + '*' * 30
def AppendResults(self, logfiles):
"""Print and write diff results to an open |logfile|."""
full, short = logfiles
_WriteToFile(full, self.banner)
_WriteToFile(short, self.banner)
for s in self.Summary():
_WriteToFile(short, s)
_WriteToFile(short, '')
for s in self.DetailedResults():
full.write(s + '\n')
@property
def summary_stat(self):
"""Returns a tuple of (name, value, units) for the most important metric."""
raise NotImplementedError()
def Summary(self):
"""A short description that summarizes the source of binary size bloat."""
raise NotImplementedError()
def DetailedResults(self):
"""An iterable description of the cause of binary size bloat."""
raise NotImplementedError()
def ProduceDiff(self, before_dir, after_dir):
"""Prepare a binary size diff with ready to print results."""
raise NotImplementedError()
def RunDiff(self, logfiles, before_dir, after_dir):
logging.info('Creating: %s', self.name)
self.ProduceDiff(before_dir, after_dir)
self.AppendResults(logfiles)
class NativeDiff(BaseDiff):
# E.g.: Section Sizes (Total=1.2 kb (1222 bytes)):
_RE_SUMMARY_STAT = re.compile(
r'Section Sizes \(Total=(?P<value>-?[0-9\.]+) ?(?P<units>\w+)')
_SUMMARY_STAT_NAME = 'Native Library Delta'
def __init__(self, size_name, supersize_path):
self._size_name = size_name
self._supersize_path = supersize_path
self._diff = []
super(NativeDiff, self).__init__('Native Diff')
@property
def summary_stat(self):
m = NativeDiff._RE_SUMMARY_STAT.search(self._diff)
if m:
return _DiffResult(
NativeDiff._SUMMARY_STAT_NAME, m.group('value'), m.group('units'))
raise Exception('Could not extract total from:\n' + self._diff)
def DetailedResults(self):
return self._diff.splitlines()
def Summary(self):
return self.DetailedResults()[:100]
def ProduceDiff(self, before_dir, after_dir):
before_size = os.path.join(before_dir, self._size_name)
after_size = os.path.join(after_dir, self._size_name)
cmd = [self._supersize_path, 'diff', before_size, after_size]
self._diff = _RunCmd(cmd)[0].replace('{', '{{').replace('}', '}}')
class ResourceSizesDiff(BaseDiff):
# Ordered by output appearance.
_SUMMARY_SECTIONS = (
'Specifics', 'InstallSize', 'InstallBreakdown', 'Dex')
# Sections where it makes sense to sum subsections into a section total.
_AGGREGATE_SECTIONS = (
'InstallBreakdown', 'Breakdown', 'MainLibInfo', 'Uncompressed')
def __init__(self, apk_name, filename='results-chart.json'):
self._apk_name = apk_name
self._diff = None # Set by |ProduceDiff()|
self._filename = filename
super(ResourceSizesDiff, self).__init__('Resource Sizes Diff')
@property
def summary_stat(self):
for section_name, results in self._diff.iteritems():
for subsection_name, value, units in results:
if 'normalized' in subsection_name:
full_name = '{} {}'.format(section_name, subsection_name)
return _DiffResult(full_name, value, units)
raise Exception('Could not find "normalized" in: ' + repr(self._diff))
def DetailedResults(self):
return self._ResultLines()
def Summary(self):
footer_lines = [
'',
'For an explanation of these metrics, see:',
('https://chromium.googlesource.com/chromium/src/+/master/docs/speed/'
'binary_size/metrics.md#Metrics-for-Android')]
return self._ResultLines(
include_sections=ResourceSizesDiff._SUMMARY_SECTIONS) + footer_lines
def ProduceDiff(self, before_dir, after_dir):
before = self._LoadResults(before_dir)
after = self._LoadResults(after_dir)
self._diff = collections.defaultdict(list)
for section, section_dict in after.iteritems():
for subsection, v in section_dict.iteritems():
# Ignore entries when resource_sizes.py chartjson format has changed.
if (section not in before or
subsection not in before[section] or
v['units'] != before[section][subsection]['units']):
logging.warning(
'Found differing dict structures for resource_sizes.py, '
'skipping %s %s', section, subsection)
else:
self._diff[section].append(_DiffResult(
subsection,
v['value'] - before[section][subsection]['value'],
v['units']))
def _ResultLines(self, include_sections=None):
"""Generates diff lines for the specified sections (defaults to all)."""
section_lines = collections.defaultdict(list)
for section_name, section_results in self._diff.iteritems():
if not include_sections or section_name in include_sections:
subsection_lines = []
section_sum = 0
units = ''
for name, value, units in section_results:
# Omit subsections with no changes for summaries.
if value == 0 and include_sections:
continue
section_sum += value
subsection_lines.append('{:>+14,} {} {}'.format(value, units, name))
section_header = section_name
if section_name in ResourceSizesDiff._AGGREGATE_SECTIONS:
section_header += ' ({:+,} {})'.format(section_sum, units)
section_header += ':'
# Omit sections with empty subsections.
if subsection_lines:
section_lines[section_name].append(section_header)
section_lines[section_name].extend(subsection_lines)
if not section_lines:
return ['Empty ' + self.name]
ret = []
for k in include_sections or sorted(section_lines):
ret.extend(section_lines[k])
return ret
def _LoadResults(self, archive_dir):
chartjson_file = os.path.join(archive_dir, self._filename)
with open(chartjson_file) as f:
chartjson = json.load(f)
charts = chartjson['charts']
# Older versions of resource_sizes.py prefixed the apk onto section names.
ret = {}
for section, section_dict in charts.iteritems():
section_no_target = re.sub(r'^.*_', '', section)
ret[section_no_target] = section_dict
return ret
class _BuildHelper(object):
"""Helper class for generating and building targets."""
def __init__(self, args):
self.clean = args.clean
self.enable_chrome_android_internal = args.enable_chrome_android_internal
self.extra_gn_args_str = args.gn_args
self.apply_patch = args.extra_rev
self.max_jobs = args.max_jobs
self.max_load_average = args.max_load_average
self.output_directory = args.output_directory
self.target = args.target
self.target_os = args.target_os
self.use_goma = args.use_goma
self._SetDefaults()
self.is_bundle = 'minimal' in self.target
@property
def abs_apk_path(self):
return os.path.join(self.output_directory, self.apk_path)
@property
def abs_mapping_path(self):
return os.path.join(self.output_directory, self.mapping_path)
@property
def apk_name(self):
# my_great_apk -> MyGreat.apk
apk_name = ''.join(s.title() for s in self.target.split('_')[:-1]) + '.apk'
if self.is_bundle:
# my_great_minimal_apks -> MyGreatMinimal.apk -> MyGreat.minimal.apks
apk_name = apk_name.replace('Minimal.apk', '.minimal.apks')
return apk_name.replace('Webview', 'WebView')
@property
def apk_path(self):
return os.path.join('apks', self.apk_name)
@property
def mapping_path(self):
if self.is_bundle:
return self.apk_path.replace('.minimal.apks', '.aab') + '.mapping'
else:
return self.apk_path + '.mapping'
@property
def main_lib_path(self):
# TODO(agrieve): Could maybe extract from .apk or GN?
if self.IsLinux():
return 'chrome'
if 'monochrome' in self.target or 'trichrome' in self.target:
ret = 'lib.unstripped/libmonochrome.so'
elif 'webview' in self.target:
ret = 'lib.unstripped/libwebviewchromium.so'
else:
ret = 'lib.unstripped/libchrome.so'
return ret
@property
def abs_main_lib_path(self):
return os.path.join(self.output_directory, self.main_lib_path)
@property
def map_file_path(self):
return self.main_lib_path + '.map.gz'
@property
def size_name(self):
if self.IsLinux():
return os.path.basename(self.main_lib_path) + '.size'
return self.apk_name + '.size'
def _SetDefaults(self):
has_goma_dir = os.path.exists(os.path.join(os.path.expanduser('~'), 'goma'))
self.use_goma = self.use_goma and has_goma_dir
self.max_load_average = (self.max_load_average or
str(multiprocessing.cpu_count()))
has_internal = os.path.exists(
os.path.join(os.path.dirname(_SRC_ROOT), 'src-internal'))
if has_internal:
self.extra_gn_args_str = (
'is_chrome_branded=true ' + self.extra_gn_args_str)
else:
self.extra_gn_args_str = (
'ffmpeg_branding="Chrome" proprietary_codecs=true' +
self.extra_gn_args_str)
if self.IsLinux():
self.extra_gn_args_str = (
'is_cfi=false generate_linker_map=true ' + self.extra_gn_args_str)
self.extra_gn_args_str = ' ' + self.extra_gn_args_str.strip()
if not self.max_jobs:
if self.use_goma:
self.max_jobs = '10000'
elif has_internal:
self.max_jobs = '500'
else:
self.max_jobs = '50'
if not self.target:
if self.IsLinux():
self.target = 'chrome'
elif self.enable_chrome_android_internal:
self.target = 'monochrome_minimal_apks'
else:
self.target = 'monochrome_public_minimal_apks'
def _GenGnCmd(self):
gn_args = 'is_official_build=true'
gn_args += ' android_channel="stable"'
# Variables often become unused when experimenting with macros to reduce
# size, so don't fail on warnings.
gn_args += ' treat_warnings_as_errors=false'
# Speed things up a bit by skipping lint & errorprone.
gn_args += ' disable_android_lint=true'
gn_args += ' use_errorprone_java_compiler=false'
gn_args += ' use_goma=%s' % str(self.use_goma).lower()
gn_args += ' target_os="%s"' % self.target_os
if self.IsAndroid():
gn_args += (' enable_chrome_android_internal=%s' %
str(self.enable_chrome_android_internal).lower())
gn_args += self.extra_gn_args_str
return [_GN_PATH, 'gen', self.output_directory, '--args=%s' % gn_args]
def _GenNinjaCmd(self):
cmd = [_NINJA_PATH, '-C', self.output_directory]
cmd += ['-j', self.max_jobs] if self.max_jobs else []
cmd += ['-l', self.max_load_average] if self.max_load_average else []
cmd += [self.target]
return cmd
def Run(self):
"""Run GN gen/ninja build and return the process returncode."""
logging.info('Building %s within %s (this might take a while).',
self.target, os.path.relpath(self.output_directory))
if self.clean:
_RunCmd([_GN_PATH, 'clean', self.output_directory])
retcode = _RunCmd(
self._GenGnCmd(), verbose=True, exit_on_failure=False)[1]
if retcode:
return retcode
return _RunCmd(
self._GenNinjaCmd(), verbose=True, exit_on_failure=False)[1]
def IsAndroid(self):
return self.target_os == 'android'
def IsLinux(self):
return self.target_os == 'linux'
class _BuildArchive(object):
"""Class for managing a directory with build results and build metadata."""
def __init__(self, rev, base_archive_dir, build, subrepo, slow_options,
save_unstripped):
self.build = build
self.dir = os.path.join(base_archive_dir, rev)
metadata_path = os.path.join(self.dir, 'metadata.txt')
self.rev = rev
self.metadata = _Metadata([self], build, metadata_path, subrepo)
self._slow_options = slow_options
self._save_unstripped = save_unstripped
def ArchiveBuildResults(self, supersize_path, tool_prefix=None):
"""Save build artifacts necessary for diffing."""
logging.info('Saving build results to: %s', self.dir)
_EnsureDirsExist(self.dir)
if self.build.IsAndroid():
self._ArchiveFile(self.build.abs_apk_path)
self._ArchiveFile(self.build.abs_mapping_path)
self._ArchiveResourceSizes()
self._ArchiveSizeFile(supersize_path, tool_prefix)
if self._save_unstripped:
self._ArchiveFile(self.build.abs_main_lib_path)
self.metadata.Write()
assert self.Exists()
def Exists(self):
ret = self.metadata.Exists() and os.path.exists(self.archived_size_path)
if self._save_unstripped:
ret = ret and os.path.exists(self.archived_unstripped_path)
return ret
@property
def archived_unstripped_path(self):
return os.path.join(self.dir, os.path.basename(self.build.main_lib_path))
@property
def archived_size_path(self):
return os.path.join(self.dir, self.build.size_name)
def _ArchiveResourceSizes(self):
cmd = [
_RESOURCE_SIZES_PATH, self.build.abs_apk_path, '--output-dir', self.dir,
'--chartjson', '--chromium-output-dir', self.build.output_directory
]
if self._slow_options:
cmd += ['--estimate-patch-size', '--dump-static-initializers']
_RunCmd(cmd)
def _ArchiveFile(self, filename):
if not os.path.exists(filename):
_Die('missing expected file: %s', filename)
shutil.copy(filename, self.dir)
def _ArchiveSizeFile(self, supersize_path, tool_prefix):
existing_size_file = self.build.abs_apk_path + '.size'
if os.path.exists(existing_size_file):
logging.info('Found existing .size file')
shutil.copy(existing_size_file, self.archived_size_path)
else:
supersize_cmd = [
supersize_path, 'archive', self.archived_size_path, '--elf-file',
self.build.abs_main_lib_path, '--output-directory',
self.build.output_directory
]
if tool_prefix:
supersize_cmd += ['--tool-prefix', tool_prefix]
if self.build.IsAndroid():
supersize_cmd += ['-f', self.build.abs_apk_path]
logging.info('Creating .size file')
_RunCmd(supersize_cmd)
class _DiffArchiveManager(object):
"""Class for maintaining BuildArchives and their related diff artifacts."""
def __init__(self, revs, archive_dir, diffs, build, subrepo, slow_options,
save_unstripped):
self.archive_dir = archive_dir
self.build = build
self.build_archives = [
_BuildArchive(rev, archive_dir, build, subrepo, slow_options,
save_unstripped)
for rev in revs
]
self.diffs = diffs
self.subrepo = subrepo
self._summary_stats = []
def MaybeDiff(self, before_id, after_id):
"""Perform diffs given two build archives."""
before = self.build_archives[before_id]
after = self.build_archives[after_id]
diff_path, short_diff_path = self._DiffFilePaths(before, after)
if not self._CanDiff(before, after):
logging.info(
'Skipping diff for %s due to missing build archives.', diff_path)
return
metadata_path = self._DiffMetadataPath(before, after)
metadata = _Metadata(
[before, after], self.build, metadata_path, self.subrepo)
if metadata.Exists():
logging.info(
'Skipping diff for %s and %s. Matching diff already exists: %s',
before.rev, after.rev, diff_path)
else:
with open(diff_path, 'w') as diff_file, \
open(short_diff_path, 'w') as summary_file:
for d in self.diffs:
d.RunDiff((diff_file, summary_file), before.dir, after.dir)
metadata.Write()
self._AddDiffSummaryStat(before, after)
if os.path.exists(short_diff_path):
_PrintFile(short_diff_path)
logging.info('See detailed diff results here: %s',
os.path.relpath(diff_path))
def GenerateHtmlReport(self, before_id, after_id):
"""Generate HTML report given two build archives."""
before = self.build_archives[before_id]
after = self.build_archives[after_id]
diff_path = self._DiffDir(before, after)
if not self._CanDiff(before, after):
logging.info(
'Skipping HTML report for %s due to missing build archives.',
diff_path)
return
supersize_path = os.path.join(_BINARY_SIZE_DIR, 'supersize')
report_path = os.path.join(diff_path, 'diff.ndjson')
supersize_cmd = [supersize_path, 'html_report', '--diff-with',
before.archived_size_path,
after.archived_size_path,
report_path]
logging.info('Creating HTML report')
_RunCmd(supersize_cmd)
logging.info('View using a local server via: %s start_server %s',
os.path.relpath(supersize_path),
os.path.relpath(report_path))
def Summarize(self):
path = os.path.join(self.archive_dir, 'last_diff_summary.txt')
if self._summary_stats:
with open(path, 'w') as f:
stats = sorted(
self._summary_stats, key=lambda x: x[0].value, reverse=True)
_WriteToFile(f, '\nDiff Summary')
for s, before, after in stats:
_WriteToFile(f, '{:>+10} {} {} for range: {}..{}',
s.value, s.units, s.name, before, after)
# Print cached file if all builds were cached.
num_archives = len(self.build_archives)
if os.path.exists(path) and num_archives > 1:
_PrintFile(path)
if num_archives <= 2:
if not all(a.Exists() for a in self.build_archives):
return
supersize_path = os.path.join(_BINARY_SIZE_DIR, 'supersize')
size2 = ''
if num_archives == 2:
size2 = os.path.relpath(self.build_archives[-1].archived_size_path)
logging.info('Enter supersize console via: %s console %s %s',
os.path.relpath(supersize_path),
os.path.relpath(self.build_archives[0].archived_size_path), size2)
def _AddDiffSummaryStat(self, before, after):
stat = None
if self.build.IsAndroid():
summary_diff_type = ResourceSizesDiff
else:
summary_diff_type = NativeDiff
for d in self.diffs:
if isinstance(d, summary_diff_type):
stat = d.summary_stat
if stat:
self._summary_stats.append((stat, before.rev, after.rev))
def _CanDiff(self, before, after):
return before.Exists() and after.Exists()
def _DiffFilePaths(self, before, after):
ret = os.path.join(self._DiffDir(before, after), 'diff_results')
return ret + '.txt', ret + '.short.txt'
def _DiffMetadataPath(self, before, after):
return os.path.join(self._DiffDir(before, after), 'metadata.txt')
def _DiffDir(self, before, after):
archive_range = '%s..%s' % (before.rev, after.rev)
diff_path = os.path.join(self.archive_dir, 'diffs', archive_range)
_EnsureDirsExist(diff_path)
return diff_path
class _Metadata(object):
def __init__(self, archives, build, path, subrepo):
self.data = {
'revs': [a.rev for a in archives],
'apply_patch': build.apply_patch,
'archive_dirs': [a.dir for a in archives],
'target': build.target,
'target_os': build.target_os,
'subrepo': subrepo,
'path': path,
'gn_args': {
'extra_gn_args_str': build.extra_gn_args_str,
'enable_chrome_android_internal': build.enable_chrome_android_internal,
}
}
def Exists(self):
path = self.data['path']
if os.path.exists(path):
with open(path, 'r') as f:
return self.data == json.load(f)
return False
def Write(self):
with open(self.data['path'], 'w') as f:
json.dump(self.data, f)
def _EnsureDirsExist(path):
if not os.path.exists(path):
os.makedirs(path)
def _RunCmd(cmd, verbose=False, exit_on_failure=True):
"""Convenience function for running commands.
Args:
cmd: the command to run.
verbose: if this is True, then the stdout and stderr of the process will be
printed. If it's false, the stdout will be returned.
exit_on_failure: die if an error occurs when this is True.
Returns:
Tuple of (process stdout, process returncode).
"""
assert not (verbose and exit_on_failure)
cmd_str = ' '.join(c for c in cmd)
logging.debug('Running: %s', cmd_str)
proc_stdout = proc_stderr = subprocess.PIPE
if verbose:
proc_stdout, proc_stderr = sys.stdout, subprocess.STDOUT
proc = subprocess.Popen(cmd, stdout=proc_stdout, stderr=proc_stderr)
stdout, stderr = proc.communicate()
if proc.returncode and exit_on_failure:
_Die('command failed: %s\nstderr:\n%s', cmd_str, stderr)
stdout = stdout.strip() if stdout else ''
return stdout, proc.returncode
def _GitCmd(args, subrepo):
return _RunCmd(['git', '-C', subrepo] + args)[0]
def _GclientSyncCmd(rev, subrepo):
cwd = os.getcwd()
os.chdir(subrepo)
_, retcode = _RunCmd(['gclient', 'sync', '-r', 'src@' + rev],
verbose=True, exit_on_failure=False)
os.chdir(cwd)
return retcode
def _SyncAndBuild(archive, build, subrepo, no_gclient, extra_rev):
"""Sync, build and return non 0 if any commands failed."""
# Simply do a checkout if subrepo is used.
if _CurrentGitHash(subrepo) == archive.rev:
if subrepo != _SRC_ROOT:
logging.info('Skipping git checkout since already at desired rev')
else:
logging.info('Skipping gclient sync since already at desired rev')
elif subrepo != _SRC_ROOT or no_gclient:
_GitCmd(['checkout', archive.rev], subrepo)
else:
# Move to a detached state since gclient sync doesn't work with local
# commits on a branch.
_GitCmd(['checkout', '--detach'], subrepo)
logging.info('Syncing to %s', archive.rev)
ret = _GclientSyncCmd(archive.rev, subrepo)
if ret:
return ret
with _ApplyPatch(extra_rev, subrepo):
return build.Run()
@contextmanager
def _ApplyPatch(rev, subrepo):
if not rev:
yield
else:
restore_func = _GenRestoreFunc(subrepo)
try:
_GitCmd(['cherry-pick', rev, '--strategy-option', 'theirs'], subrepo)
yield
finally:
restore_func()
def _GenerateRevList(rev, reference_rev, all_in_range, subrepo, step):
"""Normalize and optionally generate a list of commits in the given range.
Returns:
A list of revisions ordered from oldest to newest.
"""
rev_seq = '%s^..%s' % (reference_rev, rev)
stdout = _GitCmd(['rev-list', rev_seq], subrepo)
all_revs = stdout.splitlines()[::-1]
if all_in_range or len(all_revs) < 2 or step:
revs = all_revs
if step:
revs = revs[::step]
else:
revs = [all_revs[0], all_revs[-1]]
num_revs = len(revs)
if num_revs >= _COMMIT_COUNT_WARN_THRESHOLD:
_VerifyUserAccepts(
'You\'ve provided a commit range that contains %d commits.' % num_revs)
logging.info('Processing %d commits', num_revs)
return revs
def _ValidateRevs(rev, reference_rev, subrepo, extra_rev):
def git_fatal(args, message):
devnull = open(os.devnull, 'wb')
retcode = subprocess.call(
['git', '-C', subrepo] + args, stdout=devnull, stderr=subprocess.STDOUT)
if retcode:
_Die(message)
no_obj_message = ('%s either doesn\'t exist or your local repo is out of '
'date, try "git fetch origin master"')
git_fatal(['cat-file', '-e', rev], no_obj_message % rev)
git_fatal(['cat-file', '-e', reference_rev], no_obj_message % reference_rev)
if extra_rev:
git_fatal(['cat-file', '-e', extra_rev], no_obj_message % extra_rev)
git_fatal(['merge-base', '--is-ancestor', reference_rev, rev],
'reference-rev is newer than rev')
def _VerifyUserAccepts(message):
print(message + ' Do you want to proceed? [y/n]')
if raw_input('> ').lower() != 'y':
sys.exit()
def _EnsureDirectoryClean(subrepo):
logging.info('Checking source directory')
stdout = _GitCmd(['status', '--porcelain'], subrepo)
# Ignore untracked files.
if stdout and stdout[:2] != '??':
logging.error('Failure: please ensure working directory is clean.')
sys.exit()
def _Die(s, *args):
logging.error('Failure: ' + s, *args)
sys.exit(1)
def _WriteToFile(logfile, s, *args, **kwargs):
if isinstance(s, basestring):
data = s.format(*args, **kwargs) + '\n'
else:
data = '\n'.join(s) + '\n'
logfile.write(data)
def _PrintFile(path):
with open(path) as f:
sys.stdout.write(f.read())
@contextmanager
def _TmpCopyBinarySizeDir():
"""Recursively copy files to a temp dir and yield temp paths."""
# Needs to be at same level of nesting as the real //tools/binary_size
# since supersize uses this to find d3 in //third_party.
tmp_dir = tempfile.mkdtemp(dir=_SRC_ROOT)
try:
bs_dir = os.path.join(tmp_dir, 'binary_size')
shutil.copytree(_BINARY_SIZE_DIR, bs_dir)
# We also copy the tools supersize needs, but only if they exist.
tool_prefix = None
if os.path.exists(_DOWNLOAD_OBJDUMP_PATH):
if not os.path.exists(os.path.join(_LLVM_TOOLS_DIR, 'llvm-readelf')):
_RunCmd([_DOWNLOAD_OBJDUMP_PATH])
tools_dir = os.path.join(bs_dir, 'bintools')
tool_prefix = os.path.join(tools_dir, 'llvm-')
shutil.copytree(_LLVM_TOOLS_DIR, tools_dir)
yield (os.path.join(bs_dir, 'supersize'), tool_prefix)
finally:
shutil.rmtree(tmp_dir)
def _CurrentGitHash(subrepo):
return _GitCmd(['rev-parse', 'HEAD'], subrepo)
def _GenRestoreFunc(subrepo):
branch = _GitCmd(['rev-parse', '--abbrev-ref', 'HEAD'], subrepo)
# Happens when the repo didn't start on a named branch.
if branch == 'HEAD':
branch = _GitCmd(['rev-parse', 'HEAD'], subrepo)
def _RestoreFunc():
logging.warning('Restoring original git checkout')
_GitCmd(['checkout', branch], subrepo)
return _RestoreFunc
def _SetRestoreFunc(subrepo):
atexit.register(_GenRestoreFunc(subrepo))
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('rev',
help='Find binary size bloat for this commit.')
parser.add_argument('--archive-directory',
default=_DEFAULT_ARCHIVE_DIR,
help='Where results are stored.')
parser.add_argument('--reference-rev',
help='Older rev to diff against. If not supplied, '
'the previous commit to rev will be used.')
parser.add_argument('--all',
action='store_true',
help='Build/download all revs from --reference-rev to '
'rev and diff the contiguous revisions.')
parser.add_argument('--include-slow-options',
action='store_true',
help='Run some extra steps that take longer to complete. '
'This includes apk-patch-size estimation and '
'static-initializer counting.')
parser.add_argument('--single',
action='store_true',
help='Sets --reference-rev=rev.')
parser.add_argument('--unstripped',
action='store_true',
help='Save the unstripped native library when archiving.')
parser.add_argument(
'--subrepo',
help='Specify a subrepo directory to use. Implies '
'--no-gclient. All git commands will be executed '
'from the subrepo directory.')
parser.add_argument('--no-gclient',
action='store_true',
help='Do not perform gclient sync steps.')
parser.add_argument('--apply-patch', dest='extra_rev',
help='A local commit to cherry-pick before each build. '
'This can leave your repo in a broken state if '
'the cherry-pick fails.')
parser.add_argument('--step', type=int,
help='Assumes --all and only builds/downloads every '
'--step\'th revision.')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Show commands executed, extra debugging output'
', and Ninja/GN output.')
build_group = parser.add_argument_group('build arguments')
build_group.add_argument('-j',
dest='max_jobs',
help='Run N jobs in parallel.')
build_group.add_argument('-l',
dest='max_load_average',
help='Do not start new jobs if the load average is '
'greater than N.')
build_group.add_argument('--no-goma',
action='store_false',
dest='use_goma',
default=True,
help='Do not use goma when building with ninja.')
build_group.add_argument('--clean',
action='store_true',
help='Do a clean build for each revision.')
build_group.add_argument('--gn-args',
default='',
help='Extra GN args to set.')
build_group.add_argument('--target-os',
default='android',
choices=['android', 'linux'],
help='target_os gn arg. Default: android.')
build_group.add_argument('--output-directory',
default=_DEFAULT_OUT_DIR,
help='ninja output directory. '
'Default: %s.' % _DEFAULT_OUT_DIR)
build_group.add_argument('--enable-chrome-android-internal',
action='store_true',
help='Allow downstream targets to be built.')
build_group.add_argument('--target',
help='GN target to build. Linux default: chrome. '
'Android default: monochrome_public_minimal_apks or '
'monochrome_minimal_apks (depending on '
'--enable-chrome-android-internal).')
if len(sys.argv) == 1:
parser.print_help()
return 1
args = parser.parse_args()
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=log_level,
format='%(levelname).1s %(relativeCreated)6d %(message)s')
build = _BuildHelper(args)
subrepo = args.subrepo or _SRC_ROOT
_EnsureDirectoryClean(subrepo)
_SetRestoreFunc(subrepo)
if build.IsLinux():
_VerifyUserAccepts('Linux diffs have known deficiencies (crbug/717550).')
reference_rev = args.reference_rev or args.rev + '^'
if args.single:
reference_rev = args.rev
_ValidateRevs(args.rev, reference_rev, subrepo, args.extra_rev)
revs = _GenerateRevList(args.rev, reference_rev, args.all, subrepo, args.step)
with _TmpCopyBinarySizeDir() as paths:
supersize_path, tool_prefix = paths
diffs = [NativeDiff(build.size_name, supersize_path)]
if build.IsAndroid():
diffs += [
ResourceSizesDiff(build.apk_name)
]
diff_mngr = _DiffArchiveManager(revs, args.archive_directory, diffs, build,
subrepo, args.include_slow_options,
args.unstripped)
consecutive_failures = 0
i = 0
for i, archive in enumerate(diff_mngr.build_archives):
if archive.Exists():
logging.info('Found matching metadata for %s, skipping build step.',
archive.rev)
else:
build_failure = _SyncAndBuild(archive, build, subrepo, args.no_gclient,
args.extra_rev)
if build_failure:
logging.info(
'Build failed for %s, diffs using this rev will be skipped.',
archive.rev)
consecutive_failures += 1
if len(diff_mngr.build_archives) <= 2:
_Die('Stopping due to build failure.')
elif consecutive_failures > _ALLOWED_CONSECUTIVE_FAILURES:
_Die('%d builds failed in a row, last failure was %s.',
consecutive_failures, archive.rev)
else:
archive.ArchiveBuildResults(supersize_path, tool_prefix)
consecutive_failures = 0
if i != 0:
diff_mngr.MaybeDiff(i - 1, i)
diff_mngr.GenerateHtmlReport(0, i)
diff_mngr.Summarize()
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -672,655,338,578,976,400 | 34.992593 | 80 | 0.62582 | false |
MarkMolina/moneypenny-bot | bittrex_playground.py | 1 | 24571 | import StringIO
import json
import logging
import random
import urllib
import urllib2
import time
import math
import re
import requests
# import requests_toolbelt.adapters.appengine
# Use the App Engine Requests adapter. This makes sure that Requests uses
# URLFetch.
# requests_toolbelt.adapters.appengine.monkeypatch()
# sending images
# try:
# from PIL import Image
# except:
# pass
# import multipart
#
# # standard app engineimports
# from google.appengine.api import urlfetch
# from google.appengine.ext import deferred
# from google.appengine.ext import ndb
# from google.appengine.api.taskqueue import TaskRetryOptions
# import webapp2
TOKEN = '363749995:AAEMaasMVLSPqSuSr1MiEFcgQH_Yn88hlbg'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
#urlfetch.set_default_fetch_deadline(60)
ALERTS = set()
#
# def deffered_track_pair_price(pair, current_price, target_price, chat_id, message_id):
# alert_key = (pair, target_price)
# logging.info("Checking price alert..{} if {}".format(pair, target_price))
# kraken = KrakenExchange()
# ticker = kraken.getTicker(pair=ASSETPAIRS[pair])
# askPrice = float(ticker['Ask Price'][0])
# bidPrice = float(ticker['Bid Price'][0])
# live_price = (askPrice + bidPrice) / 2
# target_price = float(target_price)
# if current_price < target_price and live_price >= target_price:
# ALERTS.remove(alert_key)
# reply_message(
# chat_id=chat_id,
# message_id=message_id,
# msg="{} just hit {}!".format(
# pair, live_price
# )
# )
# elif current_price > target_price and live_price <= target_price:
# ALERTS.remove(alert_key)
# reply_message(
# chat_id=chat_id,
# message_id=message_id,
# msg="{} just hit {}!".format(
# pair, live_price
# )
# )
# else:
# raise Exception("Alert not hit, fail task so it is retried")
#
#
# def track_pair_price(pair, current_price, target_price, chat_id, message_id):
# ALERTS.add(
# (pair, target_price)
# )
#
# deferred.defer(
# deffered_track_pair_price,
# pair, current_price, target_price, chat_id, message_id,
# _retry_options=TaskRetryOptions(
# min_backoff_seconds=60,
# task_age_limit=86400
# ) # 1 day
# )
#
#
# # ================================
#
# class EnableStatus(ndb.Model):
# # key name: str(chat_id)
# enabled = ndb.BooleanProperty(indexed=False, default=False)
#
#
# # ================================
#
# def setEnabled(chat_id, yes):
# es = EnableStatus.get_or_insert(str(chat_id))
# es.enabled = yes
# es.put()
#
# def getEnabled(chat_id):
# es = EnableStatus.get_by_id(str(chat_id))
# if es:
# return es.enabled
# return False
#
#
# # ================================
#
# class MeHandler(webapp2.RequestHandler):
# def get(self):
# urlfetch.set_default_fetch_deadline(60)
# self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
#
#
# class GetUpdatesHandler(webapp2.RequestHandler):
# def get(self):
# urlfetch.set_default_fetch_deadline(60)
# self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
#
#
# class SetWebhookHandler(webapp2.RequestHandler):
# def get(self):
# urlfetch.set_default_fetch_deadline(60)
# url = self.request.get('url')
# if url:
# self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
#
#
# def reply_message(chat_id, message_id, msg=None, img=None):
# if msg:
# resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({
# 'chat_id': str(chat_id),
# 'text': msg.encode('utf-8'),
# 'disable_web_page_preview': 'true',
# 'reply_to_message_id': str(message_id),
# 'parse_mode': 'Markdown'
# })).read()
# elif img:
# resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [
# ('chat_id', str(chat_id)),
# ('reply_to_message_id', str(message_id)),
# ], [
# ('photo', 'image.jpg', img),
# ])
# else:
# logging.error('no msg or img specified')
# resp = None
#
# logging.info('send response:')
# logging.info(resp)
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
try:
message = body['message']
except:
message = body['edited_message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
def reply(msg=None, img=None):
reply_message(msg=msg, img=img, chat_id=chat_id, message_id=message_id)
if not text:
logging.info('no text')
return
if text.startswith('/'):
text_kraken = re.sub('(\/btc)', '/xbt', text)
text_kraken = re.sub('(btc$)', 'xbt', text)
text_kraken = re.sub('(btc\s+)', 'xbt ', text)
if text == '/start':
reply('Bot enabled')
setEnabled(chat_id, True)
if text == '/alerts':
reply(
"*Alerts*\n{}".format(
"\n".join([
"{}: {}".format(pair, price)
for pair, price in ALERTS
])
)
)
elif text == '/stop':
reply('Bot disabled')
setEnabled(chat_id, False)
elif text == '/rules':
reply('1. You do not talk about WHALE HUNTERS \n2. You DO NOT talk about WHALE HUNTERS \n3. Master level of TA skills required \n3.141592 Bring pie \n4. Inactive members will be banned')
elif text == '/image':
img = Image.new('RGB', (512, 512))
base = random.randint(0, 16777216)
pixels = [base+i*j for i in range(512) for j in range(512)]
img.putdata(pixels)
output = StringIO.StringIO()
img.save(output, 'JPEG')
reply(img=output.getvalue())
elif text == '/help' or text == '/options':
r = '/rules : show rules\n/image : generate an image\n/time(s) : get server time\n/assets : list of assets\n/pairs : list of all pairs (long)\n/<asset> : show this assets pairs\n/<assetpair> : show assetpairs price\n/alerts : show alerts'
reply(r)
elif text == '/time' or text == '/times':
time = KrakenExchange().getServerTime()['rfc1123']
r = 'Kraken server time: {}'.format(time)
reply(r)
elif text == '/assets':
r = 'Reply with /<asset> to get its pairs\n{}'.format(', '.join(ASSETS))
reply(r)
elif text == '/pairs':
assets = ASSETPAIRS.keys()
assets.sort()
r = 'Reply with /<assetpair> to get bid/ask prices\n{}'.format(', '.join(assets))
reply(r)
elif text[1:].upper() in ASSETS:
pairs = []
for pair in ASSETPAIRS:
if pair[:3] == text[1:].upper()[:3]:
pairs.append(pair)
r = 'Reply with /<assetpair> to get bid/ask prices\n{}'.format(', '.join(pairs))
reply(r)
elif text_kraken.split(' ')[0][1:].upper() in ASSETPAIRS.keys():
pair = text_kraken.split(' ')[0][1:].upper()
kraken = KrakenExchange()
ticker = kraken.getTicker(pair=ASSETPAIRS[pair])
askPrice = float(ticker['Ask Price'][0])
bidPrice = float(ticker['Bid Price'][0])
price = (askPrice + bidPrice) / 2
highPrice = float(ticker['High'][0])
lowPrice = float(ticker['Low'][0])
# time = kraken.serverTime['rfc1123']
r = ""
if len(text_kraken.split(' ')) > 1:
if text_kraken.split(' ')[1] == 'fib':
l_one = highPrice
l_two = highPrice - ((highPrice - lowPrice) * 0.236)
l_three = highPrice - ((highPrice - lowPrice) * 0.382)
l_four = highPrice - ((highPrice - lowPrice) * 0.5)
l_five = highPrice - ((highPrice - lowPrice) * 0.618)
l_six = highPrice - ((highPrice - lowPrice) * 0.786)
l_seven = lowPrice
l_eight = highPrice - ((highPrice - lowPrice) * 1.272)
l_nine = highPrice - ((highPrice - lowPrice) * 1.618)
r = '*{0}* 24h fib levels\n\n*0%*: {1}\n*23.6%*: {2}\n*38.2%*: {3}\n*50%*: {4}\n*61.8%*: {5}\n*78.6%*: {6}\n*100%*: {7}\n\n*127.2%*: {8}\n*161.8%*: {9}\n'.format(pair, l_one, l_two, l_three, l_four, l_five, l_six, l_seven, l_eight, l_nine)
if text_kraken.split(' ')[1] == 'book':
order_book = kraken.getOrderBook(pair=ASSETPAIRS[pair])
book = order_book[ASSETPAIRS[pair]]
r = "*OrderBook* {0} \n*Asks*\n{1}\n\n*Bids*\n{2}".format(
pair,
"\n".join(
["{} {}".format(ask[0], ask[1]) for ask in book['asks'][:10]]
),
"\n".join(
["{} {}".format(bid[0], bid[1]) for bid in book['bids'][:10]]
),
)
if text_kraken.split(' ')[1] == 'alert':
try:
target_price = text_kraken.split(' ')[2]
track_pair_price(pair, price, target_price, chat_id, message_id)
r = 'You want me to keep an eye on your {}? I will let you know if it rises or drops to {}'.format(
pair, target_price
)
logging.info(r)
except IndexError:
r = 'Tell me what price you want an alert for, doofus!'
else:
r = '*{}* \n*Price:* {} \n*---* \n*High:* {} \n*Low:* {}'.format(pair, price, highPrice, lowPrice)
# r += '\n\n_updated: {}_'.format(time)
reply(r)
elif text.split(' ')[0][1:].upper() in BITT_ASSETPAIRS:
# TODO: insert bittrex methods here
pair = text.split(' ')[0][1:]
bittrex = BittrexExchange()
ticker = bittrex.getTicker(pair=pair)
askPrice = float(ticker['Ask Price'])
bidPrice = float(ticker['Bid Price'])
price = (askPrice + bidPrice) / 2
highPrice = float(ticker['High'])
lowPrice = float(ticker['Low'])
r = ""
if len(text.split(' ')) > 1:
if text.split(' ')[1] == 'fib':
l_one = highPrice
l_two = highPrice - ((highPrice - lowPrice) * 0.236)
l_three = highPrice - ((highPrice - lowPrice) * 0.382)
l_four = highPrice - ((highPrice - lowPrice) * 0.5)
l_five = highPrice - ((highPrice - lowPrice) * 0.618)
l_six = highPrice - ((highPrice - lowPrice) * 0.786)
l_seven = lowPrice
l_eight = highPrice - ((highPrice - lowPrice) * 1.272)
l_nine = highPrice - ((highPrice - lowPrice) * 1.618)
r = '*{0}* 24h fib levels\n\n*0%*: {1}\n*23.6%*: {2}\n*38.2%*: {3}\n*50%*: {4}\n*61.8%*: {5}\n*78.6%*: {6}\n*100%*: {7}\n\n*127.2%*: {8}\n*161.8%*: {9}\n'.format(pair, l_one, l_two, l_three, l_four, l_five, l_six, l_seven, l_eight, l_nine)
else:
r = '*{}* \n*Price:* {} \n*---* \n*High:* {} \n*Low:* {}'.format(pair, price, highPrice, lowPrice)
reply(r)
elif len(text) == 4 or len(text) == 7:
reply('This asset(pair) is not recognized. Pick one from the /assets list, stupid.')
else:
reply('You know, this sort of behaviour could qualify as sexual harassment.')
# bot text reply's
elif 'beach' in text:
reply('dont forget to bring a towel')
# elif ('sell' in text or 'dropping' in text or 'dumping' in text) and random.choice([True, False]):
# reply('weak hands!')
# elif 'what time' in text:
# reply('look at the corner of your screen!')
# elif 'moon' in text:
# reply('http://www.louwmanexclusive.com/nl/brands/lamborghini/')
# elif 'bitch' in text:
# reply('dont talk to me like that!')
# elif 'penny' in text:
# reply('Dont talk behind my back!')
else:
if getEnabled(chat_id):
reply('I got your message! (but I do not know how to answer)')
else:
logging.info('not enabled for chat_id {}'.format(chat_id))
# ===== Kraken Exchange methods & classes ======
PUBLIC_URLS = {
'time': 'https://api.kraken.com/0/public/Time',
'assets': 'https://api.kraken.com/0/public/Assets',
'assetPairs': 'https://api.kraken.com/0/public/AssetPairs',
'ticker': 'https://api.kraken.com/0/public/Ticker',
'ohlc': 'https://api.kraken.com/0/public/OHLC',
'orderBook': 'https://api.kraken.com/0/public/Depth',
'recentTrades': 'https://api.kraken.com/0/public/Trades',
'spread': 'https://api.kraken.com/0/public/Spread',
}
TICKER_MAPPING = {
'a': 'Ask Price',
'b': 'Bid Price',
'c': 'Last Trade',
'v': 'Volume',
'p': 'Volume weighted avg',
't': '# Trades',
'l': 'Low',
'h': 'High',
'o': 'Opening Price',
}
ASSETS = ['DASH', 'EOS', 'ETC', 'ETH', 'GNO', 'ICN', 'LTC', 'MLN', 'REP', 'USDT',
'XBT', 'XDG', 'XLM', 'XMR', 'XRP', 'ZEC', 'BCH']
ASSETPAIRS = {
'DASHEUR': 'DASHEUR',
'DASHUSD': 'DASHUSD',
'DASHXBT': 'DASHXBT',
'EOSETH': 'EOSETH',
'EOSEUR': 'EOSEUR',
'EOSUSD': 'EOSUSD',
'EOSXBT': 'EOSXBT',
'ETCETH': 'XETCXETH',
'ETCEUR': 'XETCZEUR',
'ETCUSD': 'XETCZUSD',
'ETCXBT': 'XETCXXBT',
'ETHCAD': 'XETHZCAD',
'ETHEUR': 'XETHZEUR',
'ETHGBP': 'XETHZGBP',
'ETHJPY': 'XETHZJPY',
'ETHUSD': 'XETHZUSD',
'ETHXBT': 'XETHXXBT',
'GNOETH': 'GNOETH',
'GNOEUR': 'GNOEUR',
'GNOUSD': 'GNOUSD',
'GNOXBT': 'GNOXBT',
'ICNETH': 'XICNXETH',
'ICNXBT': 'XICNXXBT',
'LTCEUR': 'XLTCZEUR',
'LTCUSD': 'XLTCZUSD',
'LTCXBT': 'XLTCXXBT',
'MLNETH': 'XMLNXETH',
'MLNXBT': 'XMLNXXBT',
'REPETH': 'XREPXETH',
'REPEUR': 'XREPZEUR',
'REPUSD': 'XREPZUSD',
'REPXBT': 'XREPXXBT',
'USDTUSD': 'USDTZUSD',
'XBTCAD': 'XXBTZCAD',
'XBTEUR': 'XXBTZEUR',
'XBTGBP': 'XXBTZGBP',
'XBTJPY': 'XXBTZJPY',
'XBTUSD': 'XXBTZUSD',
'XDGXBT': 'XXDGXXBT',
'XLMEUR': 'XXLMZEUR',
'XLMUSD': 'XXLMZUSD',
'XLMXBT': 'XXLMXXBT',
'XMREUR': 'XXMRZEUR',
'XMRUSD': 'XXMRZUSD',
'XMRXBT': 'XXMRXXBT',
'XRPCAD': 'XXRPZCAD',
'XRPEUR': 'XXRPZEUR',
'XRPJPY': 'XXRPZJPY',
'XRPUSD': 'XXRPZUSD',
'XRPXBT': 'XXRPXXBT',
'ZECEUR': 'XZECZEUR',
'ZECUSD': 'XZECZUSD',
'ZECXBT': 'XZECXXBT',
'BCHEUR': 'BCHEUR',
'BCHUSD': 'BCHUSD',
'BCHXBT': 'BCHXBT',
}
MAXREQUESTS = 15
def _query(url, header):
r = requests.post(url, data=header)
if r.status_code == 200:
return json.loads(r.text)['result']
class KrakenExchange(object):
"""
Holds all methods for fetching Assets, Assetpairs and current Ticker
values from the Kraken Exchange.
Time Skew can be displayed by requesting server time.
"""
def __init__(self):
super(KrakenExchange, self).__init__()
def query_public(self, type, header=None):
return _query(PUBLIC_URLS[type], header)
def getServerTime(self):
serverTime = self.query_public('time')
if type(serverTime) == ValueError:
return serverTime.message
self.serverTime = serverTime
return self.serverTime
def getServerSkew(self):
self.serverSkew = time.time() - self.getServerTime()['unixtime']
return self.serverSkew
def getOrderBook(self, pair):
header = dict(
pair=pair,
count=10,
)
r = self.query_public('orderBook', header)
return r
def getTicker(self, pair):
header = {'pair': pair} if pair else None
r = self.query_public('ticker', header)
if type(r) == ValueError:
return r.message
self.ticker = {}
ticker = r[pair]
for t in ticker.keys():
self.ticker[TICKER_MAPPING[t]] = ticker[t]
return self.ticker
# ===== Bittrex Exchange methods & classes ======
BITT_PUBLIC_URLS = {
# hold open markets, assets and pairs.
'markets': 'https://bittrex.com/api/v1.1/public/getmarkets',
'currencies': 'https://bittrex.com/api/v1.1/public/getcurrencies ',
# Just the current price and bid ask.
'ticker': 'https://bittrex.com/api/v1.1/public/getticker',
# > 1 market 24h summary, current high-low etc
'summary': 'https://bittrex.com/api/v1.1/public/getmarketsummary',
# > 1 market 24h summary, current high-low etc
'summaries': 'https://bittrex.com/api/v1.1/public/getmarketsummaries',
'orderBook': 'https://bittrex.com/api/v1.1/public/getorderbook',
'history': 'https://bittrex.com/api/v1.1/public/getmarkethistory'
}
BITT_TICKER_MAPPING = {
'MarketName': 'Pair',
'High': 'High',
'Low': 'Low',
'Volume': 'Volume',
'Last': 'Last',
'BaseVolume': 'Base Volume',
'Bid': 'Bid Price',
'Ask': 'Ask Price',
'OpenBuyOrders': '# Buy Orders',
'OpenSellOrders': '# Sell Orders'
}
BITT_ASSETPAIRS = [
u'BTC-LTC',
u'BTC-DOGE',
u'BTC-VTC',
u'BTC-PPC',
u'BTC-FTC',
u'BTC-RDD',
u'BTC-NXT',
u'BTC-DASH',
u'BTC-POT',
u'BTC-BLK',
u'BTC-EMC2',
u'BTC-XMY',
u'BTC-AUR',
u'BTC-EFL',
u'BTC-GLD',
u'BTC-SLR',
u'BTC-PTC',
u'BTC-GRS',
u'BTC-NLG',
u'BTC-RBY',
u'BTC-XWC',
u'BTC-MONA',
u'BTC-THC',
u'BTC-ENRG',
u'BTC-ERC',
u'BTC-NAUT',
u'BTC-VRC',
u'BTC-CURE',
u'BTC-XBB',
u'BTC-XMR',
u'BTC-CLOAK',
u'BTC-START',
u'BTC-KORE',
u'BTC-XDN',
u'BTC-TRUST',
u'BTC-NAV',
u'BTC-XST',
u'BTC-BTCD',
u'BTC-VIA',
u'BTC-UNO',
u'BTC-PINK',
u'BTC-IOC',
u'BTC-CANN',
u'BTC-SYS',
u'BTC-NEOS',
u'BTC-DGB',
u'BTC-BURST',
u'BTC-EXCL',
u'BTC-SWIFT',
u'BTC-DOPE',
u'BTC-BLOCK',
u'BTC-ABY',
u'BTC-BYC',
u'BTC-XMG',
u'BTC-BLITZ',
u'BTC-BAY',
u'BTC-BTS',
u'BTC-FAIR',
u'BTC-SPR',
u'BTC-VTR',
u'BTC-XRP',
u'BTC-GAME',
u'BTC-COVAL',
u'BTC-NXS',
u'BTC-XCP',
u'BTC-BITB',
u'BTC-GEO',
u'BTC-FLDC',
u'BTC-GRC',
u'BTC-FLO',
u'BTC-NBT',
u'BTC-MUE',
u'BTC-XEM',
u'BTC-CLAM',
u'BTC-DMD',
u'BTC-GAM',
u'BTC-SPHR',
u'BTC-OK',
u'BTC-SNRG',
u'BTC-PKB',
u'BTC-CPC',
u'BTC-AEON',
u'BTC-ETH',
u'BTC-GCR',
u'BTC-TX',
u'BTC-BCY',
u'BTC-EXP',
u'BTC-INFX',
u'BTC-OMNI',
u'BTC-AMP',
u'BTC-AGRS',
u'BTC-XLM',
u'BTC-BTA',
u'USDT-BTC',
u'BITCNY-BTC',
u'BTC-CLUB',
u'BTC-VOX',
u'BTC-EMC',
u'BTC-FCT',
u'BTC-MAID',
u'BTC-EGC',
u'BTC-SLS',
u'BTC-RADS',
u'BTC-DCR',
u'BTC-SAFEX',
u'BTC-BSD',
u'BTC-XVG',
u'BTC-PIVX',
u'BTC-XVC',
u'BTC-MEME',
u'BTC-STEEM',
u'BTC-2GIVE',
u'BTC-LSK',
u'BTC-PDC',
u'BTC-BRK',
u'BTC-DGD',
u'ETH-DGD',
u'BTC-WAVES',
u'BTC-RISE',
u'BTC-LBC',
u'BTC-SBD',
u'BTC-BRX',
u'BTC-DRACO',
u'BTC-ETC',
u'ETH-ETC',
u'BTC-STRAT',
u'BTC-UNB',
u'BTC-SYNX',
u'BTC-TRIG',
u'BTC-EBST',
u'BTC-VRM',
u'BTC-SEQ',
u'BTC-XAUR',
u'BTC-SNGLS',
u'BTC-REP',
u'BTC-SHIFT',
u'BTC-ARDR',
u'BTC-XZC',
u'BTC-NEO',
u'BTC-ZEC',
u'BTC-ZCL',
u'BTC-IOP',
u'BTC-DAR',
u'BTC-GOLOS',
u'BTC-HKG',
u'BTC-UBQ',
u'BTC-KMD',
u'BTC-GBG',
u'BTC-SIB',
u'BTC-ION',
u'BTC-LMC',
u'BTC-QWARK',
u'BTC-CRW',
u'BTC-SWT',
u'BTC-TIME',
u'BTC-MLN',
u'BTC-ARK',
u'BTC-DYN',
u'BTC-TKS',
u'BTC-MUSIC',
u'BTC-DTB',
u'BTC-INCNT',
u'BTC-GBYTE',
u'BTC-GNT',
u'BTC-NXC',
u'BTC-EDG',
u'BTC-LGD',
u'BTC-TRST',
u'ETH-GNT',
u'ETH-REP',
u'USDT-ETH',
u'ETH-WINGS',
u'BTC-WINGS',
u'BTC-RLC',
u'BTC-GNO',
u'BTC-GUP',
u'BTC-LUN',
u'ETH-GUP',
u'ETH-RLC',
u'ETH-LUN',
u'ETH-SNGLS',
u'ETH-GNO',
u'BTC-APX',
u'BTC-TKN',
u'ETH-TKN',
u'BTC-HMQ',
u'ETH-HMQ',
u'BTC-ANT',
u'ETH-TRST',
u'ETH-ANT',
u'BTC-SC',
u'ETH-BAT',
u'BTC-BAT',
u'BTC-ZEN',
u'BTC-1ST',
u'BTC-QRL',
u'ETH-1ST',
u'ETH-QRL',
u'BTC-CRB',
u'ETH-CRB',
u'ETH-LGD',
u'BTC-PTOY',
u'ETH-PTOY',
u'BTC-MYST',
u'ETH-MYST',
u'BTC-CFI',
u'ETH-CFI',
u'BTC-BNT',
u'ETH-BNT',
u'BTC-NMR',
u'ETH-NMR',
u'ETH-TIME',
u'ETH-LTC',
u'ETH-XRP',
u'BTC-SNT',
u'ETH-SNT',
u'BTC-DCT',
u'BTC-XEL',
u'BTC-MCO',
u'ETH-MCO',
u'BTC-ADT',
u'ETH-ADT',
u'BTC-FUN',
u'ETH-FUN',
u'BTC-PAY',
u'ETH-PAY',
u'BTC-MTL',
u'ETH-MTL',
u'BTC-STORJ',
u'ETH-STORJ',
u'BTC-ADX',
u'ETH-ADX',
u'ETH-DASH',
u'ETH-SC',
u'ETH-ZEC',
u'USDT-ZEC',
u'USDT-LTC',
u'USDT-ETC',
u'USDT-XRP',
u'BTC-OMG',
u'ETH-OMG',
u'BTC-CVC',
u'ETH-CVC',
u'BTC-PART',
u'BTC-QTUM',
u'ETH-QTUM',
u'ETH-XMR',
u'ETH-XEM',
u'ETH-XLM',
u'ETH-NEO',
u'USDT-XMR',
u'USDT-DASH',
u'ETH-BCC',
u'USDT-BCC',
u'BTC-BCC',
u'USDT-NEO',
u'ETH-WAVES',
u'ETH-STRAT',
u'ETH-DGB',
u'ETH-FCT',
u'ETH-BTS']
# TODO: retrieve all pairs from the `getmarket` data. Pairs will have "-"
# which will be handy for separation.
class BittrexExchange(object):
"""
Holds all methods for fetching:
- Assets, Assetpairs, Current Ticker, 24h summary, order book, and history
values and current Ticker
values from the Kraken Exchange.
Time Skew can be displayed by requesting server time.
"""
def __init__(self):
super(BittrexExchange, self).__init__()
def query_public(self, type, header=None):
return _query(BITT_PUBLIC_URLS[type], header)
def getTicker(self, pair):
header = {'market': pair} if pair else None
r = self.query_public('summary', header)
if type(r) == ValueError:
return r.message
self.ticker = {}
ticker = r[0]
# print(ticker)
for t in ticker.keys():
if t in BITT_TICKER_MAPPING.keys():
self.ticker[BITT_TICKER_MAPPING[t]] = ticker[t]
return self.ticker
def getmarkets(self, type, header=None):
header = None
r = self.query_public('markets', header)
self.markets = []
markets = r
for i, cont in enumerate(markets):
self.markets.append(markets[i]["MarketName"])
return self.markets
| mit | -872,781,368,876,894,800 | 29.148466 | 263 | 0.503602 | false |
AntonelliLab/seqcap_processor | bin/aTRAM-master/tests/lib/test_core_atram.py | 1 | 5511 | """Testing functions in core_atram."""
# pylint: disable=too-many-arguments,unused-variable
from os.path import join
from unittest.mock import patch, MagicMock, call
import tempfile
import lib.core_atram as core_atram
from lib.assemblers.base import BaseAssembler
def set_up():
"""Build a generic assembler."""
cxn = 'cxn'
args = {
'query': ['query_file_1', 'query_file_2'],
'blast_db': ['blast_db_1', 'blast_db_2'],
'iterations': 1,
'log_file': 'log_file_1',
'log_level': 'info',
'temp_dir': 'temp_dir_1'}
assembler = BaseAssembler(args, cxn)
return args, cxn, assembler
@patch('lib.core_atram.write_query_seq')
def test_split_queries_01(write_query_seq):
"""Test split queries where there are no fasta files to split."""
args, cxn, _ = set_up()
args['query_split'] = []
queries = core_atram.split_queries(args)
write_query_seq.assert_not_called()
assert args['query'] == queries
@patch('lib.core_atram.write_query_seq')
def test_split_queries_02(write_query_seq):
"""Test split queries where there are fasta files to split."""
args, cxn, assembler = set_up()
args['query_split'] = ['tests/data/split_queries1.txt']
args['protein'] = True
with tempfile.TemporaryDirectory(prefix='test_') as temp_dir:
args['temp_dir'] = temp_dir
queries = core_atram.split_queries(args)
split_files = [
join(temp_dir, 'queries', 'split_queries1_seq1_1_1.fasta'),
join(temp_dir, 'queries', 'split_queries1_seq2_2_2_2.fasta'),
join(temp_dir, 'queries', 'split_queries1_seq3_3.fasta'),
join(temp_dir, 'queries', 'split_queries1_seq1_1_4.fasta')]
calls = [
call(split_files[0], 'seq1/1', 'A' * 10),
call(split_files[1], 'seq2:2/2', 'C' * 20),
call(split_files[2], 'seq3', 'G' * 30),
call(split_files[3], 'seq1+1', 'T' * 10)]
write_query_seq.assert_has_calls(calls)
assert split_files == queries
def test_write_query_seq_01():
"""It writes a sequence to a fasta file."""
args, cxn, assembler = set_up()
with tempfile.TemporaryDirectory(prefix='test_') as temp_dir:
path = join(temp_dir, 'test_query.fasta')
core_atram.write_query_seq(
path,
'my sequence name',
'aaaacccgggtt')
with open(path) as test_file:
expect = (
'>my sequence name\n'
'aaaacccgggtt\n')
assert expect == test_file.read()
@patch('lib.db_atram.create_sra_blast_hits_table')
@patch('lib.db_atram.create_contig_blast_hits_table')
@patch('lib.db_atram.create_assembled_contigs_table')
def test_clean_database_01(
create_assembled_contigs_table,
create_contig_blast_hits_table,
create_sra_blast_hits_table):
"""It runs the clean_database function."""
args, cxn, assembler = set_up()
dbh = 'my_db'
core_atram.clean_database(dbh)
create_assembled_contigs_table.assert_called_once_with(dbh)
create_contig_blast_hits_table.assert_called_once_with(dbh)
create_sra_blast_hits_table.assert_called_once_with(dbh)
@patch('lib.core_atram.blast_query_against_all_shards')
@patch('lib.core_atram.create_query_from_contigs')
@patch('lib.core_atram.filter_contigs')
def test_assembly_loop_iteration_01(
filter_contigs,
create_query_from_contigs,
blast_query_against_all_shards):
"""It iterates over the assembly processes."""
args, _, assembler = set_up()
temp_dir = 'my_temp_dir'
assembler.blast_only = False
assembler.state['query_file'] = args['query'][0]
assembler.state['blast_db'] = args['blast_db'][0]
assembler.state['iter_dir'] = 'my_iter_dir'
assembler.init_iteration = MagicMock()
assembler.count_blast_hits = MagicMock(return_value=1)
assembler.write_input_files = MagicMock()
assembler.run = MagicMock()
assembler.nothing_assembled = MagicMock(return_value=False)
assembler.assembled_contigs_count = MagicMock(return_value=11)
assembler.no_new_contigs = MagicMock(return_value=False)
core_atram.assembly_loop_iteration(args, assembler)
blast_query_against_all_shards.assert_called_once_with(assembler)
assert assembler.count_blast_hits.call_count == 1
assembler.no_new_contigs.assert_called_once_with(11)
create_query_from_contigs.create_query_from_contigs(assembler)
filter_contigs.create_query_from_contigs(assembler)
@patch('lib.blast.all_shard_paths')
def test_shard_fraction_01(all_shard_paths):
"""It gets the shards we are using when there is no split."""
args, cxn, assembler = set_up()
returns = ['1st', '2nd', '3rd', '4th']
assembler.state['blast_db'] = args['blast_db'][0]
assembler.args['fraction'] = 1.0
all_shard_paths.return_value = returns
shards = core_atram.shard_fraction(assembler)
assert returns == shards
all_shard_paths.assert_called_once_with(args['blast_db'][0])
@patch('lib.blast.all_shard_paths')
def test_shard_fraction_02(all_shard_paths):
"""It gets the shards we are using when there is a split."""
args, cxn, assembler = set_up()
assembler.args['fraction'] = 0.5
assembler.state['blast_db'] = args['blast_db'][0]
returns = ['1st', '2nd', '3rd', '4th']
all_shard_paths.return_value = returns
shards = core_atram.shard_fraction(assembler)
assert ['1st', '2nd'] == shards
all_shard_paths.assert_called_once_with(args['blast_db'][0])
| mit | 3,588,765,181,514,383,000 | 32 | 69 | 0.648521 | false |
alivecor/tensorflow | tensorflow/python/ops/array_ops.py | 1 | 82629 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for manipulating tensors.
See the @{$python/array_ops} guide.
@@string_to_number
@@to_double
@@to_float
@@to_bfloat16
@@to_int32
@@to_int64
@@cast
@@bitcast
@@saturate_cast
@@broadcast_dynamic_shape
@@broadcast_static_shape
@@shape
@@shape_n
@@size
@@rank
@@reshape
@@squeeze
@@expand_dims
@@meshgrid
@@slice
@@strided_slice
@@split
@@tile
@@pad
@@concat
@@stack
@@parallel_stack
@@unstack
@@reverse_sequence
@@reverse
@@reverse_v2
@@transpose
@@extract_image_patches
@@space_to_batch_nd
@@space_to_batch
@@required_space_to_batch_paddings
@@batch_to_space_nd
@@batch_to_space
@@space_to_depth
@@depth_to_space
@@gather
@@gather_nd
@@unique_with_counts
@@scatter_nd
@@dynamic_partition
@@dynamic_stitch
@@boolean_mask
@@one_hot
@@sequence_mask
@@dequantize
@@quantize_v2
@@quantized_concat
@@setdiff1d
@@fake_quant_with_min_max_args
@@fake_quant_with_min_max_args_gradient
@@fake_quant_with_min_max_vars
@@fake_quant_with_min_max_vars_gradient
@@fake_quant_with_min_max_vars_per_channel
@@fake_quant_with_min_max_vars_per_channel_gradient
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.util import deprecation
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_baseslice = slice
def identity(input, name=None): # pylint: disable=redefined-builtin
r"""Return a tensor with the same shape and contents as input.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if context.in_graph_mode():
return gen_array_ops.identity(input, name=name)
else:
if context.context().device_name != input.device:
return input._copy() # pylint: disable=protected-access
return input
# pylint: disable=redefined-builtin,protected-access
def expand_dims(input, axis=None, name=None, dim=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0)) # [1, 2]
tf.shape(tf.expand_dims(t, 1)) # [2, 1]
tf.shape(tf.expand_dims(t, -1)) # [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to
expand the shape of `input`. Must be in the range
`[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor`.
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
Raises:
ValueError: if both `dim` and `axis` are specified.
"""
# TODO(aselle): Remove argument dim
if dim is not None:
if axis is not None:
raise ValueError("can't specify both 'dim' and 'axis'")
axis = dim
return gen_array_ops._expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecation.deprecated(
"2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops._list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops._list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable,protected-access
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
return gen_array_ops._list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops._list_diff.__doc__
# pylint: enable=protected-access
def broadcast_dynamic_shape(shape_x, shape_y):
# pylint: disable=protected-access
"""Returns the broadcasted dynamic shape between `shape_x` and `shape_y`.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops._broadcast_args(shape_x, shape_y)
# pylint: enable=protected-access
def broadcast_static_shape(shape_x, shape_y):
"""Returns the broadcasted static shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
return common_shapes.broadcast_shape(shape_x, shape_y)
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
This operation returns an integer representing the number of elements in
`input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.size(t) # 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`. Defaults to tf.int32.
"""
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_math_ops._prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example:
```python
# shape of tensor 't' is [2, 2, 3]
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.rank(t) # 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@end_compatibility
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
def _one_like_dtype(other):
if isinstance(other, ops.Tensor):
return constant(1, other.dtype)
else:
return np.ones_like(other).dtype.type(1)
def _SliceHelper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
currently only support basic indexing. That means that
using a tensor as input is not currently allowed
Some useful examples:
```python
# strip leading and trailing 2 elements
foo = tf.constant([1,2,3,4,5,6])
print(foo[2:-2].eval()) # [3,4]
# skip every row and reverse every column
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[::2,::-1].eval()) # [[3,2,1], [9,8,7]]
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
[[7],[8],[9]]]
# Ellipses (3 equivalent operations)
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis, ...].eval()) # [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis].eval()) # [[[1,2,3], [4,5,6], [7,8,9]]]
```
Notes:
- `tf.newaxis` is `None` as in NumPy.
- An implicit ellipsis is placed at the end of the `slice_spec`
- NumPy advanced indexing is currently not supported.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable
object to slice (i.e. tensor is the read-only view of this
variable).
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _baseslice):
# python doesn't always use None when constructing ranges
# for example a[:] gives slice(None,sys.maxsize,None)
# whereas a[::1] gives slice(None,None,None)
if s.start is not None and s.start is not sys.maxsize:
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and s.stop != sys.maxsize:
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None:
strides.append(s.step)
else:
# Use a 1 of the same dtype as begin.
strides.append(_one_like_dtype(begin[-1]))
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
begin.append(s)
end.append(s + 1)
strides.append(_one_like_dtype(s))
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(None, "strided_slice",
[tensor] + begin + end + strides) as name:
if begin:
packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
stack(strides))
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
Note that @{tf.Tensor.__getitem__} is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice([3, 0], [4, foo.get_shape()[1]-2])`.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]
tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],
# [[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
"""Extracts a strided slice of a tensor (generalized python array indexing).
**Most users will want to use @{tf.Tensor.__getitem__} and
@{tf.Variable.__getitem__}.** That allows NumPy style slicing syntax (i.e.
`tensor[..., 3:4:-1, tf.newaxis, 3]`).
This op is the low-level interface that are used to implement operators.
Those interfaces are much more friendly, and highly recommended.
To a first order, this operation extracts a slice of size `end - begin`
from a tensor `input`
starting at the location specified by `begin`. The slice continues by adding
`stride` to the `begin` index until all dimensions are not less than `end`.
Note that components of stride can be negative, which causes a reverse
slice.
This operation can be thought of an encoding of a numpy style sliced
range. Given a python slice input[<spec0>, <spec1>, ..., <specn>]
this function will be called as follows.
`begin`, `end`, and `strides` will be all length n. n is in general
not the same dimensionality as `input`.
For the ith spec,
`begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask`,
and `shrink_axis_mask` will have the ith bit corresponding to
the ith spec.
If the ith bit of `begin_mask` is non-zero, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask` is non-zero, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is one, then `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example `foo[3:5,4]` on a 10x8 tensor produces a shape 2 tensor
whereas `foo[3:5,4:5]` produces a shape 2x1 tensor with shrink_mask
being 1<<1 == 2.
If the ith bit of `shrink_axis_mask` is one, then `begin`,
`end[i]`, and `stride[i]` are used to do a slice in the appropriate
dimension, but the output tensor will be reduced in dimensionality
by one. This is only valid if the ith entry of slice[i]==1.
NOTE: `begin` and `end` are zero-indexed`.
`strides` entries must be non-zero.
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]
tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],
# [3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
var: The variable corresponding to `input_` or None
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
parent_name = name
def assign(val, name=None):
"""Closure that holds all the arguments to create an assignment."""
if var is None:
raise ValueError("Sliced assignment is only supported for variables")
if name is None:
name = parent_name + "_assign"
return var._strided_slice_assign(
begin=begin,
end=end,
strides=strides,
value=val,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
if context.in_graph_mode():
# TODO(apassos) In eager mode assignment will be done by overriding
# __setitem__ instead.
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See ${tf.Tensor$`Tensor.__getitem__`}
for detailed examples of slicing.
This function in addition also allows assignment to a sliced range.
This is similar to `__setitem__` functionality in Python. However,
the syntax is different so that the user can capture the assignment
operation for grouping or passing to `sess.run()`.
For example,
```python
import tensorflow as tf
A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]
op = A[:2,:2].assign(22. * tf.ones((2, 2)))
print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
```
Note that assignments currently do not support NumPy broadcasting
semantics.
Args:
var: An `ops.Variable` object.
slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
As an operator. The operator also has a `assign()` method
that can be used to generate an assignment operator.
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
return _SliceHelper(var._AsTensor(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _SliceHelper)
def parallel_stack(values, name="parallel_stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
Requires that the shape of inputs be known at graph construction time.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the first dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
tensor will have the shape `(N, A, B, C)`.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]
```
The difference between `stack` and `parallel_stack` is that `stack` requires
all the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction.
`parallel_stack` will copy pieces of the input into the output as they become
available, in some situations this can provide a performance benefit.
Unlike `stack`, `parallel_stack` does NOT support backpropagation.
This is the opposite of unstack. The numpy equivalent is
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
"""
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops._parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
def stack(values, axis=0, name="stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)
tf.stack([x, y, z], axis=1) # [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unstack. The numpy equivalent is
```python
tf.stack([x, y, z]) = np.asarray([x, y, z])
```
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name).get_shape()
if value_shape.ndims is not None:
expanded_num_dims = value_shape.ndims + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" % (axis, -expanded_num_dims,
expanded_num_dims))
return gen_array_ops._pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError("Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" % (elem.dtype, dtype,
elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops._pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be
converted to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref:
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is not None and dtype != inferred_dtype:
return NotImplemented
return _autopacking_helper(v, inferred_dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function((list, tuple),
_autopacking_conversion_function, 99)
def unstack(value, num=None, axis=0, name="unstack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of stack. The numpy equivalent is
tf.unstack(x, n) = list(x)
Args:
value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred
if `None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first
dimension. Negative values wrap around, so the valid range is `[-R, R)`.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops._unpack(value, num=num, axis=axis, name=name)
def concat(values, axis, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) # [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1) # [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat([t3, t4], 0)) # [4, 3]
tf.shape(tf.concat([t3, t4], 1)) # [2, 6]
```
Note: If you are concatenating along a new axis consider using stack.
E.g.
```python
tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
```
can be rewritten as
```python
tf.stack(tensors, axis=axis)
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be
in the range `[-rank(values), rank(values))`.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that axis is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(
axis, name="concat_dim",
dtype=dtypes.int32).get_shape().assert_is_compatible_with(
tensor_shape.scalar())
return identity(values[0], name=scope)
return gen_array_ops._concat_v2(values=values, axis=axis, name=name)
def boolean_mask(tensor, mask, name="boolean_mask"):
"""Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where(mask), squeeze_dims=[1])
return gather(reshaped_tensor, indices)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
shape_tensor[:ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops._prod(shape(tensor)[:ndims_mask], [0])
tensor = reshape(tensor,
concat([[leading_size],
shape(tensor)[ndims_mask:]], 0))
first_dim = shape_tensor[:ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape([first_dim])
.concatenate(shape_tensor[ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask)
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices # [12, 26, 37, 45]
tf.shape(a.values) # [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse_mask(a, [12, 45])
b.indices # [26, 37]
tf.shape(b.values) # [2, 10]
```
Args:
a: An `IndexedSlices` instance.
mask_indices: Indices of elements to mask.
name: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = setdiff1d(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
"""Splits a tensor into sub tensors.
If `num_or_size_splits` is an integer type, `num_split`, then splits `value`
along dimension `axis` into `num_split` smaller tensors.
Requires that `num_split` evenly divides `value.shape[axis]`.
If `num_or_size_splits` is not an integer type, it is presumed to be a Tensor
`size_splits`, then splits `value` into `len(size_splits)` pieces. The shape
of the `i`-th piece has the same size as the `value` except along dimension
`axis` where the size is `size_splits[i]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
tf.shape(split0) # [5, 4]
tf.shape(split1) # [5, 15]
tf.shape(split2) # [5, 11]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
tf.shape(split0) # [5, 10]
```
Args:
value: The `Tensor` to split.
num_or_size_splits: Either a 0-D integer `Tensor` indicating the number of
splits along split_dim or a 1-D integer `Tensor` integer tensor containing
the sizes of each output tensor along split_dim. If a scalar then it must
evenly divide `value.shape[axis]`; otherwise the sum of sizes along the
split dimension must match that of the `value`.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
name: A name for the operation (optional).
Returns:
if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
objects; if `num_or_size_splits` is a 1-D Tensor returns
`num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
`value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
size_splits = ops.convert_to_tensor(num_or_size_splits)
if size_splits.get_shape().ndims == 0 and size_splits.dtype.is_integer:
return gen_array_ops._split(
split_dim=axis, num_split=num_or_size_splits, value=value, name=name)
else:
if num is None:
size_splits_shape = size_splits.get_shape()
num = size_splits_shape.dims[0]
if num._value is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops._split_v(
value=value,
size_splits=size_splits,
split_dim=axis,
num_split=num,
name=name)
def transpose(a, perm=None, name="transpose"):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
if perm is None:
rank = gen_array_ops.rank(a)
perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
ret = gen_array_ops.transpose(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
if context.in_graph_mode():
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = gen_array_ops.transpose(a, perm, name=name)
return ret
# pylint: disable=invalid-name
def matrix_transpose(a, name="matrix_transpose"):
"""Transposes last two dimensions of tensor `a`.
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.matrix_transpose(x) # [[1, 4],
# [2, 5],
# [3, 6]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.matrix_transpose(x) is shape [1, 2, 4, 3]
```
Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
This is done with minimal cost, and is preferable to using this function. E.g.
```python
# Good! Transpose is taken at minimal additional cost.
tf.matmul(matrix, b, transpose_b=True)
# Inefficient!
tf.matmul(matrix, tf.matrix_transpose(b))
```
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat((gen_math_ops._range(0, a_rank - 2, 1),
[a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm)
# pylint: enable=invalid-name
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], tf.int32) # [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
try:
shape = tensor_shape.as_shape(shape)
output = constant(zero, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
# For now, variant types must be created via zeros_like; as we need to
# pass the input variant object to the proper zeros callback.
if tensor.shape.is_fully_defined() and tensor.dtype != dtypes.variant:
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)
if dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
else:
return gen_array_ops._zeros_like(tensor, name=name)
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, `complex128` or
`bool`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
if context.in_graph_mode():
ret.set_shape(tensor.get_shape())
return ret
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], tf.int32) # [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
one = True if dtype == dtypes.bool else 1
try:
shape = tensor_shape.as_shape(shape)
output = constant(one, shape=shape, dtype=dtype, name=name)
except (TypeError, ValueError):
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
return gen_array_ops._placeholder(dtype=dtype, shape=shape, name=name)
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
"""Returns a tuple of (Tensor or None, rank or None)."""
if shape is None:
return (None, None)
rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)
if not isinstance(shape, ops.Tensor) and None in shape:
return (None, rank)
return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape_name = (name + "/shape") if name is not None else None
shape, rank = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype,
shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64, shape=[None, rank],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
# TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
# remove the "Pad" fallback here.
if constant_values != 0:
result = gen_array_ops._pad_v2(
tensor, paddings, constant_values, name=name)
else:
result = gen_array_ops._pad(tensor, paddings, name=name)
elif mode == "REFLECT":
result = gen_array_ops._mirror_pad(
tensor, paddings, mode="REFLECT", name=name)
elif mode == "SYMMETRIC":
result = gen_array_ops._mirror_pad(
tensor, paddings, mode="SYMMETRIC", name=name)
else:
raise ValueError("Unknown padding mode: %s" % mode)
# Restore shape information where possible.
if context.in_graph_mode():
paddings_constant = tensor_util.constant_value(
result.op.inputs[1], partial=True)
input_shape = result.op.inputs[0].shape
if (input_shape.ndims is not None and not result.shape.is_fully_defined()
and paddings_constant is not None):
new_shape = []
for padding, dim in zip(paddings_constant, input_shape.as_list()):
if padding is None or dim is None or not all(padding):
new_shape.append(None)
else:
new_shape.append(sum(padding) + dim)
result.set_shape(new_shape)
return result
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```python
x = [1, 2, 3]
y = [4, 5, 6]
X, Y = tf.meshgrid(x, y)
# X = [[1, 2, 3],
# [1, 2, 3],
# [1, 2, 3]]
# Y = [[4, 4, 4],
# [5, 5, 5],
# [6, 6, 6]]
```
Args:
*args: `Tensor`s with rank 1.
indexing: Either 'xy' or 'ij' (optional, default: 'xy').
name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N.
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO: improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"]
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]]
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(hypothesis, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(truth, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops._edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(
grad,
op.inputs[0],
min=op.get_attr("min"),
max=op.get_attr("max"),
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
return fake_quant_with_min_max_vars_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
return fake_quant_with_min_max_vars_per_channel_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
"""Calculate padding required to make block_shape divide input_shape.
This function can be used to calculate a suitable paddings argument for use
with space_to_batch_nd and batch_to_space_nd.
Args:
input_shape: int32 Tensor of shape [N].
block_shape: int32 Tensor of shape [N].
base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum
amount of padding to use. All elements must be >= 0. If not specified,
defaults to 0.
name: string. Optional name prefix.
Returns:
(paddings, crops), where:
`paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
satisfying:
paddings[i, 0] = base_paddings[i, 0].
0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
crops[i, 0] = 0
crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
Raises: ValueError if called with incompatible shapes.
"""
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(
input_shape, dtype=dtypes.int32, name="input_shape")
block_shape = ops.convert_to_tensor(
block_shape, dtype=dtypes.int32, name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape()[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(
base_paddings, dtype=dtypes.int32, name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack(
[[0, pad_end_extra[i]] for i in range(num_block_dims)], name="crops")
return result_paddings, result_crops
def space_to_batch(input, paddings, block_size, name=None): # pylint: disable=redefined-builtin
result = space_to_batch_nd(
input,
paddings=paddings,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops._space_to_batch.__doc__
def batch_to_space(input, crops, block_size, name=None): # pylint: disable=redefined-builtin
result = batch_to_space_nd(
input,
crops=crops,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops._batch_to_space.__doc__
def one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`.
Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
For example:
```python
indices = [0, 1, 2]
depth = 3
tf.one_hot(indices, depth) # output: [3 x 3]
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
indices = [0, 2, -1, 1]
depth = 3
tf.one_hot(indices, depth,
on_value=5.0, off_value=0.0,
axis=-1) # output: [4 x 3]
# [[5.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 5.0], # one_hot(2)
# [0.0, 0.0, 0.0], # one_hot(-1)
# [0.0, 5.0, 0.0]] # one_hot(1)
indices = [[0, 2], [1, -1]]
depth = 3
tf.one_hot(indices, depth,
on_value=1.0, off_value=0.0,
axis=-1) # output: [2 x 2 x 3]
# [[[1.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 1.0]], # one_hot(2)
# [[0.0, 1.0, 0.0], # one_hot(1)
# [0.0, 0.0, 0.0]]] # one_hot(-1)
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(name, "one_hot",
[indices, depth, on_value, off_value, axis,
dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists \
else None
off_dtype = ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists\
else None
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if (on_exists and on_dtype != dtype):
raise TypeError("dtype {0} of on_value does not match " \
"dtype parameter {1}".format(on_dtype, dtype))
if (off_exists and off_dtype != dtype):
raise TypeError("dtype {0} of off_value does not match " \
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match " \
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops._one_hot(indices, depth, on_value, off_value, axis,
name)
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Return a mask tensor representing the first N positions of each row.
Example:
```python
tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
```
Args:
lengths: 1D integer tensor, all its values < maxlen.
maxlen: scalar integer tensor, maximum length of each row. Default: use
maximum over lengths.
dtype: output type of the resulting tensor.
name: name of the op.
Returns:
A 2D mask tensor, as shown in the example above, cast to specified dtype.
Raises:
ValueError: if the arguments have invalid rank.
"""
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if lengths.get_shape().ndims != 1:
raise ValueError("lengths must be 1D for sequence_mask. Got shape %s" %
lengths.get_shape())
if maxlen is None:
maxlen = gen_math_ops._max(lengths, [0])
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(
constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, 1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
return result
else:
return gen_math_ops.cast(result, dtype)
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`.
If specified, only squeezes the dimensions listed. The dimension
index starts at 0. It is an error to squeeze a dimension that is not 1.
Must be in the range `[-rank(input), rank(input))`.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
if squeeze_dims is not None:
if axis is not None:
raise ValueError("Cannot specify both 'squeeze_dims' and 'axis'")
axis = squeeze_dims
if np.isscalar(axis):
axis = [axis]
return gen_array_ops._squeeze(input, axis, name)
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `x` and `y` must have the same shape.
The `condition` tensor must be a scalar if `x` and `y` are scalar.
If `x` and `y` are vectors of higher rank, then `condition` must be either a
vector with size matching the first dimension of `x`, or must have the same
shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it
chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
has the same shape as `x` and `y`, then it chooses which element to copy from
`x` and `y`.
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
y: A `tensor` with the same shape and type as `x`.
name: A name of the operation (optional)
Returns:
A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
A `Tensor` with shape `(num_true, dim_size(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
return gen_array_ops.where(input=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops._select(condition=condition, t=x, e=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
def reverse(tensor, axis, name=None):
return gen_array_ops.reverse_v2(tensor, axis, name)
reverse.__doc__ = gen_array_ops.reverse_v2.__doc__
# pylint: disable=redefined-builtin
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
# pylint: enable=redefined-builtin
reverse_sequence.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
"seq_dim", "seq_axis")
def gather(params, indices, validate_indices=None, name=None, axis=0):
# TODO(rjryan): Remove "Gather" creation in favor of GatherV2 once the forward
# compatibility 3 week period has passed.
if axis == 0:
return gen_array_ops.gather(
params, indices, validate_indices=validate_indices, name=name)
else:
return gen_array_ops.gather_v2(params, indices, axis, name=name)
gather.__doc__ = gen_array_ops.gather_v2.__doc__
| apache-2.0 | -9,138,453,467,312,795,000 | 32.864344 | 105 | 0.631098 | false |
tvgrabbers/tvgrabpyAPI | tvgrabpyAPI/texts/create_texts_nl.py | 1 | 43744 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pickle, io, os, sys
# If you like to create a translation, you do the following.
# - copy this file to a file with the two letter short for that language replacing "en".
# - also fill this two letter short in the lang variable below
# - replace the text strings with your language version, but:
# - keep the '%' (%s, %d, etc) markers in place as they get replaced by things like the name of a file
# - if there is an EOL '\n' at the end, leave it also in place, but also do not add your own
# - however in some situations you can spread the text over multiple lines
# - keep any indentations at the start
# - run this new created script to create the langage file for your own use
# - send us this new created script and we probably include it with the language file in the package.
# - check regularily if you need to update the script, update the version and send us the updated version.
# There are a few special categories:
# - In texts[u'config][u'help'] you should check that the output on the --help option does not excede a width of 80
# Else use spaces and newlines to layout.
# - In texts[u'config][u'confighelp'] there are several groups followed by empty lines. If empty they are not printed,
# but you can use them if you need more space. e.g. 1 - 10, 11 - 16, 21 - 39, 41 - 52, 61 - 67, 71 - 77, 81 - 87, 91 - 139, ...
name = 'tv_grab_text'
version = (1, 0, 0)
lang = 'nl'
language = 'Nederlands'
def load_texts():
texts = {
u'config':{
u'error':{
-2: u'Het %se tekst bestand is geladen\n' % (language),
-1: u'Fout bij het maken van de bericht tekst! (%s, %s: %s)\n',
0: u'De bericht tekst (%s, %s: %s) is niet gevonden!\n',
1: u'Geen valide bron beschrijving voor %s gevonden. De bron wordt uitgeschakeld!\n',
2: u'Je kunt dit script niet als "root" draaien behalve met de --configure optie.\n' + \
'Wanneer je --configure als "root" draait, dan wordt de configuratie in\n' + \
'"/etc/tvgrabpyAPI/" geplaatst en als reserve configuratie gebruikt.\n',
3: u'Fout bij het bijwerken van de nieuwe configuratie.\n',
4: u'Verwijder ajb het oude configuratie bestand en draai opnieuw met de --configure flag.\n',
5: u'Het configuratie bestand %s is bijgewerkt!\n',
6: u'Controleer of je tevreden bent met de instellingen.\n',
7: u'Wanneer dit een nieuwe installatie is, activeer dan nu eerst de gewenste zenders!\n',
8: u'Het configuratiebestand: %s wordt aangemaakt\n',
9: u'Fout bij het maken van de nieuwe configuratie. Probeer de oude terug te zetten.\n',
10: u'Het configuratie bestand %s is aangemaakt!\n',
11: u'De opties in het configuratiebestand %s zijn bijgewerkt!\n',
12: u'Een offset %s hoger dan het maximum is belachelijk. We zetten het op %s',
13: u'We kunnen maximaal 14 dagen vooruit kijken. Resetting!\n',
14: u'De folder %s wordt aangemaakt,\n',
15: u'Er kan niet naar het uitvoer bestand: %s geschreven worden.\n',
16: u'Er is geen toegang tot de configuratie/log folder: %s\n',
17: u'Het logbestand: %s kan niet worden geopend\n',
18: u'Het configuratie bestand: %s wordt gebruikt\n',
19: u'Het alternatief configuratie bestand %s wordt geprobeerd.\n',
20: u'Er kan niet naar het cache bestand: %s geschreven worden.\n',
21: u'Fout bij de toegang tot de cache (folder): %s\n',
22: u'Alles wordt in snelle modus gezet\n',
23: u'De zender: %s wordt in snelle modus gezet\n',
24: u'Een maximale beschrijving van %d tekens wordt voor zender %s ingesteld\n',
25: u'Een maximale overlap van 0 betekent een overlap strategy van: "%s"\n',
26: u'Een maximale overlap van 0 betekent voor zender %s een overlap strategy van: "%s"\n',
27: u'Een maximale overlap van: %d wordt voor zender %s gebruikt.\n',
28: u'overlap strategy voor zender: %s is ingesteld op: "%s"\n',
31: u'Draai het script opnieuw met de --configure flag.\n',
32: u'"legacy_xmltvids = True" wordt toegevoegd\n',
33: u'Draai het script met "--configure" om het permanent te maken.\n',
34: u'De onbekende afdeling "%s" wordt genegeerd.\n',
35: u'De configuratie regel "%s" wordt genegeerd. Deze bevindt zich buiten een bekende afdeling.\n',
36: u'Fout bij het lezen van de configuratie.\n',
37: u'Fout bij het lezen van een regel van de [Configuration] afdeling in %s:\n ',
38: u'Fout bij het lezen van een regel van de [Channels] afdeling in %s:\n ',
39: u'De zender afdeling [%s] wordt genegeerd. Onbekende zender.\n',
40: u'Fout bij het lezen van een regel van de [%s] afdeling in %s:\n ',
41: u'Fout bij het lezen van het standaarden bestand: %s\n',
43: u'Fout bij het lezen van het instellingenbestand op github.\n',
44: u'Het is onmogelijk de configuratie voort te zetten!\n',
45: u'Een ongeldige start tijd voor %s in de gecombineerde zender: %s\n Het wordt op middernacht gezet!',
46: u'Een ongeldige eind tijd voor %s in de gecombineerde zender: %s\n Het wordt op middernacht gezet!',
47: u'Een ongeldige start tijd voor %s\n Het wordt op middernacht gezet!',
48: u'Een ongeldige eind tijd voor %s\n Het wordt op middernacht gezet!',
62: u'Niet alle zender informatie kon worden binnen gehaald.\n',
63: u'Probeer opnieuw over 15 minuten of zo; of schakel de falende bron uit.\n',
64: u'De Bron %s (%s) is uitgeschakeld',
65: u'Er worden geen detail pagina\'s van %s (%s) gehaald.',
66: u'Zender specifieke instellingen anders dan de bovenstaande (uitsluitend voor de actieve zenders!):',
67: u' de prime_source instelling: %s (%s) in het .json bestand wordt niet gebruikt\n',
68: u' De Bron %s (%s) is uitgeschakeld\n',
69: u' De detail Bron %s (%s) is uitgeschakeld\n',
70: u'Fout bij het openen van het oude configuratie bestand. Er wordt een nieuwe aangemaakt.\n',
71: u'Fout bij het lezen van de oude configuratie\n',
72: u'The bron %s is uitgeschakeld voor deze API versie!\n'
},
u'help':{
1: u' Een verzamelaar van TV programmagegevens vanuit meerdere bronnen,\n',
2: u' die vervolgens die gegevens combineert in één XMLTV compatibele lijst.',
3: u'Toon deze tekst',
5: u'Toon de versie',
6: u'Geeft een korte beschrijving van het programma',
7: u'Geeft een uitgebreide beschrijving van het programma\n' + \
'in het engels',
8: u'xmltv vereiste optie',
9: u'Geeft de gewenste methode om aangeroepen te worden',
10: u'Geeft de beschikbare bronnen',
11: u'Schakel een genummerde bron uit. Zie "--show-sources"\n' + \
'voor een lijst van de beschikbare bronnen.',
12: u'Geeft de beschikbare detail bronnen',
13: u'Geeft de beschikbare logo bronnen',
15: u'Schakel een genummerde bron uit voor detail pagina\'s.\n' + \
'Zie "--show-detail-sources" voor een lijst van de\n' + \
'beschikbare bronnen.',
16: u'Schakel het ophalen van extra gegevens van ttvdb.com uit',
17: u'Zoek op ttvdb.com naar een serie titel en sla hem\n' + \
'eventueel met het ID op in de database.\n' + \
'Plaats aanhalingstekens om de titel! Voeg eventueel\n' + \
'achter de titel een tweeletterige taalcode toe.\n',
18: u'Voeg"%s" toe achter het xmltv id\n',
19: u'Verwijder zoals voor versie 2.2.8 voor bron 0 en 1 het\n' + \
'bronid van het chanid om het xmltvid te krijgen.',
20: u'Gebruik UTC tijd voor de uitvoer',
21: u'Maak een nieuw configuratie bestand aan en\n' + \
'hernoem een bestaand bestand naar *.old.',
22: u'Plaats alle actieve zender in het nieuwe bestand\n' + \
'in een aparte groep boben aan de lijst.\n' + \
'Alleen relevant samen met de configure optie.',
23: u'Naam van het configuratie bestand\n' + \
'<standaard = "%s">',
24: u'Sla de op dit moment gedefinieerde opties op in het\n' + \
'configuratie bestand. Voeg opties toe aan de commando\n' + \
'regel om ze toe te voegen of te wijzigen.',
25: u'Gebruik dit bestand voor de cache functie\n' + \
'<standaard = "%s">',
26: u'Verwijder achterhaalde programmagegevens uit de cache',
27: u'Verwijder alle programmagegevens uit de cache',
28: u'Verwijder alle ttvdb gegevens uit de cache',
29: u'Betand waarnaartoe de uitvoer te sturen.\n' + \
'<standaard naar het scherm>',
30: u'Gebruik voor de uitvoer de Windows codeset (cp1252)\n' + \
'in plaats van utf-8',
31: u'Onderdruk alle log uitvoer naar het scherm.',
32: u'Zend de log uitvoer ook naar het scherm.',
33: u'Haal geen detail pagina\'s van één van de bronnen op.\n',
34: u'<standaard> Haal de beschikbare detail pagina\'s van de\n' + \
'bronnen op',
35: u'De eerste dag waarvan programma gegevens op te halen\n' + \
'<standaard is 0 is vandaag>',
36: u'Het aantal dagen waarvoor programmagegevens op te halen.\n' + \
'<max 14 = standaard>\n' + \
'Elke bron heeft zijn eigen maximum, dat lager kan zijn.\n',
38: u'Het aantal dagen om "traag" (met details) gegevens op\n' + \
'te halen.\n' + \
'Standaard alle dagen',
39: u'<standaard> Voeg url\'s van de zender iconen toe\n' + \
'(mythfilldatabase zal deze dan gebruiken)',
40: u'Voeg geen url\'s van de zender iconen toe',
41: u'Markeer de HD programma\'s,\n' + \
'gebruik dit niet als je alleen maar analoge SD opneemt',
42: u'<standaard> Vertaal de genre\'s van de bronnen naar\n' + \
'MythTV-genre\'s. Zie het %s.set bestand\n' + \
'voor de vertaal tabellen',
43: u'Vertaal de genre\'s van de bronnen niet naar\n' + \
'MythTV-genres.',
44: u'Het maximaal toegelaten aantal karakters voor de\n' + \
'beschrijvingen.De rest wordt weggeknipt.',
45: u'Wat te doen wanneer programma\'s niet goed aansluiten:\n' + \
'"avarage" Neem het gemiddelde van de eindtijd en de\n' + \
' begintijd van het volgende programma.\n' + \
' <standaard>\n' + \
'"stop" Pas de begintijd van het volgende programma\n' + \
' aan aan de eindtijd.\n' + \
'"start" Pas de eindtijd aan aan de begintijd van het \n' + \
' volgende programma.\n' + \
'"none" Doe niets.\n',
46: u'De maximale afwijking tussen eindtijd en begintijd van\n' + \
'het volgende programma dat gecorrigeerd mag worden.\n' + \
'<standaard 10 minuten>',
47: u'Geef de taal voor de systeem en log berichten.\n' + \
'Op dit moment "en" (standaard) of "nl"',
48: u'Gebruik alleen data uit de cache.',
49: u'Verwijder alle programmagegevens van de opgegeven bron\n' + \
'uit de cache',
50: u'Verwijder alle programmagegevens van de opgegeven zender\n' + \
'uit de cache',
100: u' Een testmodule om bronbestanden te testen.\n' + \
' Je kunt de variabelen ook instellen in:%s/.json_struct/tv_grab_test.conf',
101: u'Naam van de grabber en de configuratie on te testen\n' + \
'like <"tv_grab_xx">',
102: u'De dag waarop te testen <defaults to 0 is today>',
103: u'De directory waar de bron bestanden zich bevinden.\n' + \
'<default = "%s">\n' + \
'(Je kan een aparte directory voor het graber bestand\n' + \
'in %s/.json_struct/tv_grab_test.conf instellen)',
104: u'De directory waarin de rapporten te plaatsen\n' + \
'<default = "%s">',
105: u'De te testen bron <default = 1>',
106: u'De uit te voeren test. Geldig zijn:\n' + \
' channels: test de channels of base-channels data_def\n' + \
' base: test de base data_def\n' + \
' detail: test de detail data_def\n' + \
' lineup: test de lineup van al de bronnen\n' + \
'<default = channels>',
107: u'De van de base-page te extraheren chanid',
108: u'De detailid in de URL van de detail-pagina',
109: u'Het voor tv_grab_test_json.py te gebruiken report-level\n' + \
'<default = 511 or -513>',
110: u'Creer of update het configuratie bestand:\n' + \
'"%s.json_struct/tv_grab_test.conf"'
},
u'confighelp':{
0: u'# VERANDER DE ONDERSTAANDE WAARDE NIET!\n',
1: u'# Zie: https://github.com/tvgrabbers/tvgrabpyAPI/wiki/configuration\n',
2: u'# of: https://github.com/tvgrabbers/tvgrabnlpy/wiki/Over_de_configuratie\n',
3: u'# Dit is een lijst met de standaard opties ingesteld met --configure (-C)\n',
4: u'# Velen kun je op de commandregel met opties veranderen.\n',
5: u'# Wees voorzichtig met handmatig bewerken. Ongeldige waarden worden\n',
6: u'# stilzwijgend genegeerd. Voor boolean waarden kun je True/False, On/Off\n',
7: u'# of 0/1 gebruiken. Geen waarde schakeld ze aan, een ongeldige waarde uit.\n',
8: u'# Je kunt altijd je log bestand controleren voor de feitelijk gebruikte\n',
9: u'# waarden. Alleen hier getoonde opties kun je hier instellen.\n',
10: u'',
11: u'# Zet always_use_json op False om door het .json databestand voorgestelde\n',
12: u'# waarden voor zendernaam, zendergroep en prime_source te negeren.\n',
13: u'# Wanneer je hier zelf niets aan veranderd hebt, laat je hem het best\n',
14: u'# op True staan om maximaal van alle updates te kunnen profiteren.\n',
15: u'',
16: u'',
21: u'# De volgende zijn tunning parameters. Normaal gesproken behoef je hier niets\n',
22: u'# aan te veranderen.\n',
23: u'# global_timeout is de maximum tijd in secondes om op een pagina te wachten.\n',
24: u'# max_simultaneous_fetches is het maximum aantal pagina\'s dat tegelijkertijd\n',
25: u'# opgehaald kan worden. Bij meer verzoeken worden deze in de wacht gezet.\n',
26: u'# Met het toenemend aantal bronnen is het mogelijk dat zij allemaal tegelijk\n',
27: u'# hun pagina op proberen te halen. Dit kan tot verstopping van je internet\n',
28: u'# verbinding leiden en dus tot mislukkingen.\n',
29: u'# Wanneer je regelmatig "incomplete read failures" of "get_page timed out"\n',
30: u'# fouten ziet kun je proberen de eerste op te hogen of de tweede te verlagen.\n',
31: u'# Dit zal de totale duur niet belangrijk beinvloeden, want dit wordt voornamelijk\n',
32: u'# bepaald door de bron met de meeste detail pagina\'s en de verplichte wachttijd\n',
33: u'# tussen de pagina\'s om de bronnen niet te overbelasten.\n',
34: u'# Maar mislukte basis pagina\'s worden opnieuw geprobeerd en een mislukte\n',
35: u'# detail pagina kan betekenen, dat deze van een andere bron geprobeerd wordt.\n',
36: u'# Dus veel mislukkingen, met name bij de detail pagina\'s kan de totale duur\n',
37: u'# verlengen.\n',
38: u'',
39: u'',
41: u'# Dit bepaalt wat er naar het log en het scherm gaat.\n',
42: u'# 0 Niets (gebruik quiet mode om alleen uitvoer naar het scherm uit te schakelen)\n',
43: u'# 1 Geef Fouten en waarschuwingen\n',
44: u'# 2 Geef Welke pagina\'s opgehaald worden\n',
45: u'# 4 Statistieken van onder andere het samenvoegen van de bronnen\n',
46: u'# 8 Zend alle detail en ttvdb verzoeken naar het scherm\n',
47: u'# 16 Zend alle detail en ttvdb verzoeken naar het log bestand\n',
48: u'# 32 Geef details van het samenvoegen van de bronnen (zie hieronder)\n',
49: u'# 64 Toon alle titel hernoemingen\n',
50: u'# 128 Toon alle TTVDB mislukkingen\n',
51: u'# 256 DataTreeGrab Warnings\n',
52: u'# 512 Geef elke 5 min de Queue status op het scherm weer\n',
53: u'',
61: u'# Welke samenvoeg resultaten gaan naar het log/scherm (heeft log_level 32 nodig)\n',
62: u'# 0 = Log niets\n',
63: u'# 1 = log niet gekoppelde programma\'s, die toegevoegd worden\n',
64: u'# 2 = log overgebleven, niet toegevoegd programma\'s\n',
65: u'# 4 = Log gekoppelde programma\'s\n',
66: u'# 8 = Log groepslots\n',
67: u'',
71: u'# Zet "mail_log" op True om het log naar het onderstaande mail-adres te sturen.\n',
72: u'# Stel ook je mailserver en poort juist in.\n',
73: u'# SSL/startTLS wordt niet ondersteund, evenmin als een login om te verzenden.\n',
74: u'# Test dit eerst vanaf de console, want het versturen gebeurt na het sluiten van\n',
75: u'# het log en je ziet daarin dus geen fouten!\n',
76: u'',
77: u'',
81: u'# Mogelijke waarden voor ratingstyle (kijkwijzerstijl) zijn:\n',
82: u'# long : Voeg de lange beschrijving en de iconen toe\n',
83: u'# short : Voeg een enkel woord en de iconen toe\n',
84: u'# single: Voeg een enkele regel toe (mythtv gebruikt alleen het eerste item)\n',
85: u'# none : Voeg niets toe\n',
86: u'',
87: u'',
91: u'# Dit zijn de zender definities. Je kan een zender uitschakelen door aan het \n',
92: u'# begin van de regel een "#" te plaatsen. Gescheiden door ";" zie je op elke\n',
93: u'# regel: De naam, de groep, het chanID, de ID\'s voor de verschillende bronnen\n',
94: u'# in de volgorde zoals door de "--show-sources" optie weergegeven (waarbij bron 0\n',
95: u'# niet bestaat, tvgids.nl is van 0 naar 3 verhuisd!!) en de logo bron en naam.\n',
96: u'# Je kunt de naam naar behoefte aanpassen.\n',
97: u'# Een ontbrekend ID betekent dat die bron deze zender niet levert.\n',
98: u'# Het verwijderen van een ID schakelt de zender voor die bron uit, maar zorg dat\n',
99: u'# de ";"s blijven staan! Je kunt echter beter de "disable_source" optie gebruiken.\n',
100: u'# Zet de logo bron op 99 om zelf een volledige URL naar een logo te leveren.\n',
101: u'#\n',
102: u'# Om per zender opties in te stellen, kun je onderaan een sectie zoals: \n',
103: u'# [Channel <channelID>] toevoegen, waarbij <channelID> het derde item is.\n',
104: u'# Zie de WIKI op https://github.com/tvgrabbers/tvgrabnlpy/wiki voor verdere\n',
105: u'# beschrijvingen. Je kunt de volgende opties instellen:\n',
106: u'# Boolean waarden (True, 1, on of geen waarde betekent True. De rest False):\n',
107: u'# fast, compat, legacy_xmltvids, logos, cattrans, mark_hd, add_hd_id,\n',
108: u'# disable_ttvdb, use_split_episodes\n',
109: u'# legacy_xmltvids: is only valid for the Dutch/Flemish grabber\n',
110: u'# add_hd_id: Wanneer deze op True gezet wordt, worden er twee programma\n',
111: u'# lijsten voor de zender gemaakt één gewone en één met "-hd" achter het\n',
112: u'# xmltv ID. en met HD markering. "mark_hd" wordt dan voor deze zender genegeerd.\n',
113: u'# Integer waarden:\n',
114: u'# slowdays, max_overlap, desc_length, prime_source, prefered_description\n',
115: u'# disable_source, disable_detail_source\n',
116: u'# prime_source is de bron waarvan de tijden en titel dominant zijn.\n',
117: u'# Standaard is dit voor RTL zenders 2, voor NPO zenders 4, voor nederlandse\n',
118: u'# regionale zenders 5, voor groep 2 en 9 (Vlaams) 6. Verder de eerst\n',
119: u'# beschikbare bron in de volgorde (2, 4, 10, 12, 7, 3, 5, 1, 9, 6, 8, 11)\n',
120: u'# prefered_description (1-12) is de bron die, wanneer beschikbaar de \n',
121: u'# omschrijving levert. Standaard is dit de langst beschikbare.\n',
122: u'# Met disable_source en disable_detail_source kun je een bron voor deze\n',
123: u'# zender uitschakelen. Voor alles of alleen voor de detail pagina\'s\n',
124: u'# Een niet beschikbare bron uitschakelen heeft geen effect.\n',
125: u'# Met de commando regel opties: "--show-sources" en "--show-detail-sources"\n',
126: u'# kun je een lijst tonen van de beschikbare bronnen en hun ID\n',
127: u'# String waarden:\n',
128: u'# overlap_strategy (met als mogelijke waarden): \n',
129: u'# average, stop, start; iets anders levert de waarde none\n',
130: u'# xmltvid_alias: Standaard wordt het chanid gebruikt als xmltvID.\n',
131: u'# Hiermee kun je een andere tekst waarde instellen. Wees voorzichtig niet een\n',
132: u'# al bestaande waarde te kiezen. Het kan door "--configure"ingesteld worden\n',
133: u'# om chanid veranderingen te ondervangen. Zie verder de WIKI\n',
134: u'\n',
135: u'',
136: u'',
137: u'',
138: u'',
139: u'',
140: u'',
141: u'# Dit is een lijst van titels met een ":" die niet in een titel en\n',
142: u'# een afleverings titel gesplitst moeten worden. Dit zijn met name\n',
143: u'# spin-off series zoals: "NCIS: Los Angeles". Films en programma\'s\n',
144: u'# die al een afleverings titel hebben, zijn al uitgesloten.\n',
145: u'',
146: u'# Dit is een lijst van groepstitels voor de ":", die verwijderd moeten\n',
147: u'# worden. Bijvoorbeeld: "KRO detectives".\n',
148: u'',
149: u'',
150: u'# Dit is een lijst van titels die hernoemd moeten worden.\n',
151: u'# Bijvoorbeeld "navy NCIS" naar "NCIS". Dit onder anderen om\n',
152: u'# verschillende titels bij verschillende bronnen op te vangen.\n',
153: u'',
154: u'# Dit is een lijst van genres waarvoor detail pagina\'s opgehaald moeten\n',
155: u'# worden. Voor programma\'s zonder deze genres worden geen detailpagina\'s\n',
156: u'# opgehaald. Gebruik de genres van voor de toepassing van cattrans.\n',
157: u'# Voeg "none" toe om voor programma\'s zonder genre details op te halen.\n',
158: u'# Voeg "all" toe om, wanneer beschikbaar altijd details op te halen.\n',
159: u'# voor alle andere genres in de lijst worden nu GEEN details opgehaald!\n',
160: u'',
161: u'',
162: u'# Dit zijn de vertaallijsten voor:\n',
163: u'# naar een gemeenschappelijk genre:subgenre. Wanneer cattrans is ingeschakeld\n',
164: u'# dan worden deze vervolgens volgens de lijst verder naar beneden omgezet.\n',
165: u'',
166: u'# De genres van:\n',
167: u'# %s worden als subgenres gezien.\n',
168: u'# Dit zijn lijsten van genres om hieraan toe te voegen. Nieuwe "subgenres"\n',
169: u'# worden automatisch gekoppeld en toegevoegd op basis van algemene regels.\n',
170: u'',
171: u'# Dit is de "Genre:Subgenre" conversie tabel die door cattrans wordt gebruikt.\n',
172: u'# "Genre:Subgenre" wordt automatisch naar kleine letters omgezet\n',
173: u'# en begin en eind spaties worden verwijderd.\n',
174: u'# De lijst wordt gesorteerd met de genres zonder subgenre aan het begin.\n',
175: u'# Nieuwe waarden worden continu toegevoegd\n',
176: u'',
177: u'',
178: u'# achter het "=" teken geef je de te gebruiken categorie\n',
179: u'# Als een categorie leeg is dan wordt de hoofd categorie of een bestaande\n',
180: u'# standaard gebruikt\n',
181: u'# Als een hoofd categorie leeg is, dan wordt een standaard waarde aangeleverd.\n',
182: u'# en gebruikt. Wanneer er geen standaard bekent is, dan wordt "Unknown"\n',
183: u'# gebruikt. Het is verstandig om regelmatig op nieuwe hoofd categorieën\n',
184: u'# te controleren, zodat deze niet naar "Unknown" vertaald worden.\n',
185: u'',
186: u'',
191: u'# ttvdb_lookup_level is een integer waarde tussen 0 en 3\n',
192: u'# Bij een hoger waarde zal voor meer programma\'s een ttvdbid opgezocht worden\n',
193: u'# 0 is gelijk aan "disable_ttvdb" = True, maar het laat je wel nog voor\n',
194: u'# aparte zenders een hogere waarde instellen, want de module wordt wel geladen.\n',
195: u'# 1 doet alleen een look-up bij gedeeltelijk ontbrekende seizoen/episode info.\n',
196: u'# 2 bij alle series die tenminste een episode titel hebben.\n',
197: u'# 3 bij alle series.\n',
198: u'# De pre_merge optie is alleen relevant voor bronnen die data van een detailpagina\n',
199: u'# nodig hebben om de juiste start en stop tijden vast te stellen (npo.nl)\n',
200: u'# EN die als prime_source ingesteld zijn.\n',
201: u'# Omdat dit de duur kan verlengen kun je het hier of voor één kanaal uitschakelen.\n',
211: u'# Dit zijn lijsten van de genres die als een serie, film en sport genre gezien worden\n',
212: u'# Het wordt bijv. gebruikt om programma\'s aan te merken om ze op thetvdb op te zoeken\n',
213: u'# en voor naam manipulaties om een episode titel te vinden.\n',
214: u'# De door de beheerder toegevoegde standaard kun je niet verwijderen.\n',
215: u''
},
u'mergeinfo':{
1: u'%s is samengevoegd met %s\n',
2: u'Omdat ze allebij actief zijn, hebben we geen Alias ingesteld.\n',
3: u'Wanneer je het oude chanid %s als xmltvid\n',
4: u'wilt gebruiken, moet je:\n',
5: u'toevoegen aan de zender configuratie voor %s\n',
6: u'Omdat het oude chanid actief was, hebben we een Alias ingesteld\n',
7: u'voor de zender configuratie van %s\n',
8: u'Omdat %s al een xmltvid_alias heeft\n',
9: u'hebben we dit niet aangepast.\n',
10: u'Wanneer je het oude chanid %s als xmltvid\n',
11: u'wilt gebruiken moet je:\n',
12: u'veranderen in:',
13: u'in de zender configuratie van %s\n',
14: u'We konden niet controleren op zelf aangepaste opties voor het oude chanid: %s\n',
15: u'Dus controleer de nieuwe instellingen van het nieuwe chanid: %s\n'
},
u'stats':{
72: u'Uitvoering gereed.\n',
73: u'Verzamel statistieken van %s programma\'s voor %s zenders:\n',
74: u' Start tijd: %s\n',
75: u' Eind tijd: %s\n',
76: u' Duur: %s\n',
77: u'%6.0f pagina(\'s) opgehaald, waarvan er %s faalden en %s geen data opleverden\n',
78: u'%6.0f cache vonst(en)\n',
79: u'%6.0f succesvolle ttvdb.com verwijzingen\n',
80: u'%6.0f misluktte ttvdb.com verwijzingen\n',
81: u' Tijd/pagina: %s seconds\n',
82: u'%6.0f pagina(\'s) opgehaald van theTVDB.com\n',
83: u'%6.0f mislukking(en) op theTVDB.com\n',
84: u'%6.0f basis pagina(\'s) opgehaald van %s, waarvan %s geen data opleverden\n',
85: u'%6.0f detail pagina(\'s) opgehaald van %s, waarvan %s geen data opleverden\n',
86: u'%6.0f mislukking(en) op %s\n'
},
u'other':{
0: u'Verzamel API die meerdere bronnen samenvoegt.',
1: u'De beschikbare bronnen zijn:',
2: u'De beschikbare detail bronnen zijn:',
3: u'De beschikbare logo bronnen zijn:',
4: u' 99: Je eigen volledige logo url',
5: u'De begintijd van deze verzamelronde is %s\n',
6: u'Versie',
7: u'Taal',
8: u'Er is een nieuwere stabiele API release bescikbaar op github!\n',
9: u'Ga naar: %s\n',
10: u'Er is een nieuwere stabiele frontend release beschikbaar!\n',
11: u'De zender/bron json data is nieuwer!\n',
12: u'Draai met "--configure" om dit te implementeren\n'
}},
u'IO':{
u'error':{
1: u'Het bestand: "%s" is niet gevonden of kon niet worden geopend.\n',
2: u'%s is niet met %s gecodeerd.\n',
3: u'%s heeft een ongeldige codering %s.\n',
10: u'Wanneer je hulp wilt, voeg dan ajb je configuratie en log bestanden bij!\n',
11: u'Een onverwachte fout is opgetreden in de %s thread:\n',
12: u'Een onverwachte fout is opgetreden:\n',
13: u'Een onbekend log-bericht: %s van type %s\n',
14: u'bij het verzamelen van de basis-pagina\'s\n',
15: u'De huidige detail url is: %s\n',
16: u'bij het ophalen van de detail pagina\'s\n',
20: u'Er is geen cache bestand opgegeven. De cache functionaliteit wordt uitgeschakeld!\n',
21: u'De cache folder is niet toegankelijk. De cache functionaliteit wordt uitgeschakeld!\n',
22: u'Een fout bij het laden van de database: %s.db (mogelijke corruptie)\n',
23: u'We proberen de backup te laden',
24: u'Het laden van de database: %s.db is mislukt\n',
25: u'De cache functionaliteit wordt uitgeschakeld',
26: u'Database Fout\n'
},
u'other':{
1: u'De Database wordt gecontroleerd.\n',
2: u'We wachten op alle overgebleven programma-threads om af te sluiten.\n'}},
u'fetch':{
u'error':{
1: u'get_page duurt te lang (>%s s): %s\n',
2: u'Een onverwachte fout "%s:%s" is opgetreden bij het ophalen van: %s\n',
3: u'Kan de url %s niet openen.\n',
4: u'Kan de pagina niet lezen. %s: code=%s\n',
5: u'De url: %s gaf een JSON fout: %s\n',
11: u'Fout bij het verwerken van de %s-functie %s voor bron %s\n',
12: u'De geleverde data was: %s\n',
13: u'Er is een fout "%s" opgetreden bij het inlezen van een DataTree\n',
14: u'Er is een fout: "%s"\n opgetreden bij het inlezen van een %s DataTree voor %s\n',
19: u'De verwerking van de IDs: %s op TheTVDB duurt te lang.\n',
20: u'Zender %s lijkt op %s verloren ttvdb verzoeken te wachten.\n',
21: u'Zender %s lijkt op %s verloren detail verzoeken te wachten.\n',
22: u'We annuleren en stellen het als klaar.\n',
23: u'Fout bij het verwerken van de detail-pagina: %s\n',
24: u'Fout bij het verwerken van de detail2-pagina: %s\n',
25: u'Fout bij het ophalen van de %s URL voor bron: %s uit de json data_def\n',
26: u'Fout bij het lezen van de %s-pagina: %s\n',
27: u'De juiste datum van de: %s pagina kan niet worden vastgesteld.\n',
28: u'Sla zender %s op %s!, dag=%d over. Verkeerde datum!\n',
29: u'Een onverwachte fout bij het ophalen van de %s-pagina van: %s\n',
30: u'Het is onmogelijk om zender informatie van %s te verkrijgen\n',
31: u'Een fatale fout bij het verwerken van de basis-pagina\'s van %s\n',
32: u'We stellen dat ze allemaal binnen zijn en laten de andere bronnen de taak voltooien.\n',
33: u'Kan de programma titel van "%s" op zender: %s, van bron: %s niet bepalen.\n',
34: u'Kan de programma tijd van "%s" op zender: %s, van bron: %s niet bepalen.\n',
35: u'De pagina %s leverde geen data op\n',
36: u'Verwijder "%s" van "%s"\n',
37: u'De titel "%s" wordt gesplitst\n',
38: u'Hernoem "%s" naar "%s"\n',
39: u'%s gaf een lege pagina voor zender: %s.\n We gaan er van uit dat de rest ook leeg is!\n',
51: u'Geen data van %s voor zender: %s\n',
52: u'De detail bron: %s is gestopt.\n',
53: u'Dus we stoppen met wachten voor de onderhanden detailverzoeken voor %s\n',
54: u' en %s',
55: u'%s is gestopt voordat het de %s zender data heeft vrijgegeven. We stoppen met wachten!\n'
},
u'report':{
1: u'Nu wordt %s(xmltvid=%s%s) van %s opgehaald\n',
2: u'Nu word(t/en) %s zender(s) van %s opgehaald\n',
3: u'Nu wordt de %s zendergroep van %s opgehaald\n',
4: u' (zender %s van %s) voor dag %s van %s.\n',
5: u' (zender %s van %s) voor %s dagen.\n',
6: u' (zender %s van %s) voor periode %s van %s).\n',
7: u' (zender %s van %s) voor %s dagen, pagina %s.\n',
8: u' voor dag %s van %s.\n',
9: u' voor %s dagen.\n',
10: u' voor periode %s van %s.',
11: u' voor %s dagen, pagina %s.\n',
12: u'\nDag %s voor %s van %s wordt uit de cache gehaald.\n',
15: u'Sla zender %s op %s, dag=%d over. Geen data\n',
16: u'Sla zender %s op %s over!. Geen data',
17: u'Sla zender %s op %s over!, periode=%d. Geen data\n',
18: u'Sla zender %s op %s over!, pagina=%d. Geen data\n',
19: u'Sla dag %d op %s over. Geen data\n',
20: u'Sla %s over. Geen data\n',
21: u'Sla periode %d op %s over. Geen data\n',
22: u'Sla pagina %d op %s over. Geen data\n',
23: u'Sla zendergroep %s op %s over!, dag=%d. Geen data\n',
24: u'Sla zendergroep %s op %s over!. Geen data',
25: u'Sla zendergroep %s op %s over!, periode=%d. Geen data\n',
26: u'Sla zendergroep %s op %s over!, pagina=%d. Geen data\n',
31: u'[ophalen mislukt] %s:(%3.0f%%) %s\n',
32: u'[%s verzoek] %s:(%3.0f%%) %s\n',
33: u' [cached] %s:(%3.0f%%) %s\n',
34: u'[geen verzoek] %s:(%3.0f%%) %s\n',
35: u'[%s premerge] %s:(%3.0f%%) %s\n',
41: u'Nu wordt de cache gecontrolleerd op %s programmadetails voor %s(xmltvid=%s%s)\n',
42: u'Nu worden de details voor %s programma\'s op %s(xmltvid=%s%s) opgehaald\n',
43: u' (zender %s van %s) voor %s dagen.\n',
},
u'stats':{
1: u'Detail statistieken voor %s (zender %s van %s)\n',
2: u'%6.0f cache vonst(en)\n',
3: u'%6.0f zonder details in de cache\n',
4: u'%6.0f succesvolle ttvdb.com verwijzingen\n',
5: u'%6.0f misluktte ttvdb.com verwijzingen\n',
6: u'%6.0f detail pagina(\'s) opgehaald van %s.\n',
7: u'%6.0f mislukking(en)\n',
8: u'%6.0f zonder detail info\n',
9: u'%6.0f resterend in de %s queue om te verwerken\n',
10: u'%6.0f uitgesloten door het genre filter\n'
},
u'other':{
1: u' %s.json wordt gedownload ...\n',
u'': u''}},
u'merge':{
u'error':{
1: u'Het gat/overlap van %s min. om %s op zender %s wordt gecorrigeerd.\n',
2: u'Het programma %s op %s\n met gelijke start en stop tijd wordt verwijderd.\n'
},
u'stats':{
1: u'Nu worden %s programma\'s van %s aan %s toegevoegd\n',
2: u'Nu worden %s programma\'s van %s met %s programma\'s van %s samengevoegd\n',
3: u' (zender %s van %s)\n',
5: u'Toevoeg',
6: u'Samenvoeg',
7: u' bron',
8: u'zender',
9: u'%s statistieken voor %s (zender %s van %s)\n van %s %s\n',
10: u'%7.0f programma\'s op %s voor: %s - %s\n (%2.0f groepslots),\n',
11: u'%7.0f programma\'s van %s voor: %s - %s\n (%2.0f groepslots)\n',
12: u'%7.0f programma\'s gekoppeld op tijd en naam\n',
13: u'%7.0f programma\'s nieuw toegevoegd\n',
14: u'%7.0f programma\'s toegevoegd aan een groepslots\n',
15: u'%7.0f programma\'s generiek gekoppeld op naam om een genre te verkrijgen\n',
16: u'%7.0f programma\'s ongekoppeld overgebleven in %s\n',
17: u'Nu %4.0f programma\'s waarvan %2.0f groepslots\n',
18: u'en %4.0f titels zonder geassocieerd genre.\n',
19: u'Detail',
31: u'Toegevoegd van %s:%s: %s Genre: %s.\n',
32: u'Overgebleven in %s:%s: %s Genre: %s.\n',
33: u'Gekoppeld van %s:%s: %s Genre: %s.\n',
34: u' van %s:%s: %s Genre: %s.\n',
35: u'Ongekoppeld: %s: %s Genre: %s.\n',
36: u' op tijd en titel met:%s: %s Genre: %s.\n',
37: u'Toegevoegd aan groepslot: %s: %s Genre: %s.\n',
38: u'',
39: u'',
}},
u'ttvdb':{
u'error':{
1: u'Sorry, thetvdb.com is uitgeschakeld!\n',
2: u'Svp geef een serie titel!\n',
3: u'Ongeldige taalcode: "%s" gegeven. "en" wordt gebruikt\n',
11: u'Fout bij het ophalen van een ID van theTVdb.com\n',
12: u'Fout bij het ophalen van de afleveringen van theTVDB.com\n',
13: u' Geen ttvdb id voor "%s" op zender %s\n',
14: u'ttvdb verwijzing voor "%s: %s"\n',
15: u'ttvdb mislukking voor "%s: %s" op zender %s\n',
16: u'Er is geen episode data voor %s gevonden op theTVDB.com\n'
},
u'frontend':{
0: u'',
1: u'De serie "%s" is al opgeslagen met ttvdbID: %s -> %s',
2: u' voor de talen: (%s)\n',
3: u'De serie "%s" is nog niet bekend!\n',
4: u'Er is geen verwijzing voor %s gevonden op theTVDB.com',
5: u'theTVDB Zoek resultaten:',
6: u'Geef een keuze (eerste nummer, q om te annuleren):',
7: u'Verwijder het oude record',
8: u'"%s" met de aliassen "%s" en "%s" wordt onder ttvdbID: %s aan de database toegevoegd!',
9: u'"%s" met alias "%s" wordt onder ttvdbID: %s aan de database toegevoegd!',
10: u'"%s" wordt onder ttvdbID: %s aan de database toegevoegd!',
11: u'\nHet "%s" bestand voor "%s" id = %s met %s episodes wordt opgehaald!\n',
12: u' Episode %4.0f van %s wordt verwerkt\n',
13: u' Wil je dit vervangen met %s ("%s")? <y/n>\n',
14: u'Het opgegeven ttvdbid %s is onbekend op theTVDB.com!',
15: u'Druk ajb op y of op n.',
16: u' Wil je het toevoegen als %s ("%s")? <y/n>\n'
}}}
return texts
def create_pickle(texts):
fle_name = u'%s/%s.%s' % (os.path.abspath(os.curdir), name, lang)
if os.path.isfile(fle_name):
print(u'The language file %s already exists.\nDo you want to overwrite it [Y|N]?' % fle_name)
while True:
x = sys.stdin.read(1)
if x in ('n', 'N'):
print(u'Exiting')
sys.exit(0)
elif x in ('Y', 'y'):
break
os.remove(fle_name)
print(u'Writing %s language file' % language)
fle = open(fle_name, 'w')
text_dict = {}
text_dict['lang'] = lang
text_dict['language'] = language
text_dict['version'] = version
text_dict['texts'] = texts
pickle.dump(text_dict, fle)
def main():
texts = load_texts()
create_pickle(texts)
# allow this to be a module
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | 815,895,586,813,689,200 | 66.075153 | 131 | 0.53694 | false |
lidavidm/mathics-heroku | venv/lib/python2.7/site-packages/sympy/functions/combinatorial/numbers.py | 1 | 40953 | """
This module implements some special functions that commonly appear in
combinatorial contexts (e.g. in power series); in particular,
sequences of rational numbers such as Bernoulli and Fibonacci numbers.
Factorials, binomial coefficients and related functions are located in
the separate 'factorials' module.
"""
from sympy.core.function import Function, expand_mul
from sympy.core import S, Symbol, Rational, oo, Integer, C, Add, Dummy
from sympy.core.compatibility import as_int, SYMPY_INTS
from sympy.core.cache import cacheit
from sympy.functions.combinatorial.factorials import factorial
from sympy.mpmath import bernfrac
from sympy.mpmath.libmp import ifib as _ifib
def _product(a, b):
p = 1
for k in xrange(a, b + 1):
p *= k
return p
from sympy.utilities.memoization import recurrence_memo
# Dummy symbol used for computing polynomial sequences
_sym = Symbol('x')
_symbols = Function('x')
#----------------------------------------------------------------------------#
# #
# Fibonacci numbers #
# #
#----------------------------------------------------------------------------#
class fibonacci(Function):
"""
Fibonacci numbers / Fibonacci polynomials
The Fibonacci numbers are the integer sequence defined by the
initial terms F_0 = 0, F_1 = 1 and the two-term recurrence
relation F_n = F_{n-1} + F_{n-2}.
The Fibonacci polynomials are defined by F_1(x) = 1,
F_2(x) = x, and F_n(x) = x*F_{n-1}(x) + F_{n-2}(x) for n > 2.
For all positive integers n, F_n(1) = F_n.
* fibonacci(n) gives the nth Fibonacci number, F_n
* fibonacci(n, x) gives the nth Fibonacci polynomial in x, F_n(x)
Examples
========
>>> from sympy import fibonacci, Symbol
>>> [fibonacci(x) for x in range(11)]
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
>>> fibonacci(5, Symbol('t'))
t**4 + 3*t**2 + 1
References
==========
.. [1] http://en.wikipedia.org/wiki/Fibonacci_number
.. [2] http://mathworld.wolfram.com/FibonacciNumber.html
See Also
========
bell, bernoulli, catalan, euler, harmonic, lucas
"""
@staticmethod
def _fib(n):
return _ifib(n)
@staticmethod
@recurrence_memo([None, S.One, _sym])
def _fibpoly(n, prev):
return (prev[-2] + _sym*prev[-1]).expand()
@classmethod
def eval(cls, n, sym=None):
if n.is_Integer:
n = int(n)
if n < 0:
return S.NegativeOne**(n + 1) * fibonacci(-n)
if sym is None:
return Integer(cls._fib(n))
else:
if n < 1:
raise ValueError("Fibonacci polynomials are defined "
"only for positive integer indices.")
return cls._fibpoly(n).subs(_sym, sym)
class lucas(Function):
"""
Lucas numbers
Lucas numbers satisfy a recurrence relation similar to that of
the Fibonacci sequence, in which each term is the sum of the
preceding two. They are generated by choosing the initial
values L_0 = 2 and L_1 = 1.
* lucas(n) gives the nth Lucas number
Examples
========
>>> from sympy import lucas
>>> [lucas(x) for x in range(11)]
[2, 1, 3, 4, 7, 11, 18, 29, 47, 76, 123]
References
==========
.. [1] http://en.wikipedia.org/wiki/Lucas_number
.. [2] http://mathworld.wolfram.com/LucasNumber.html
See Also
========
bell, bernoulli, catalan, euler, fibonacci, harmonic
"""
@classmethod
def eval(cls, n):
if n.is_Integer:
return fibonacci(n + 1) + fibonacci(n - 1)
#----------------------------------------------------------------------------#
# #
# Bernoulli numbers #
# #
#----------------------------------------------------------------------------#
class bernoulli(Function):
r"""
Bernoulli numbers / Bernoulli polynomials
The Bernoulli numbers are a sequence of rational numbers
defined by B_0 = 1 and the recursive relation (n > 0)::
n
___
\ / n + 1 \
0 = ) | | * B .
/___ \ k / k
k = 0
They are also commonly defined by their exponential generating
function, which is x/(exp(x) - 1). For odd indices > 1, the
Bernoulli numbers are zero.
The Bernoulli polynomials satisfy the analogous formula::
n
___
\ / n \ n-k
B (x) = ) | | * B * x .
n /___ \ k / k
k = 0
Bernoulli numbers and Bernoulli polynomials are related as
B_n(0) = B_n.
We compute Bernoulli numbers using Ramanujan's formula::
/ n + 3 \
B = (A(n) - S(n)) / | |
n \ n /
where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6
when n = 4 (mod 6), and::
[n/6]
___
\ / n + 3 \
S(n) = ) | | * B
/___ \ n - 6*k / n-6*k
k = 1
This formula is similar to the sum given in the definition, but
cuts 2/3 of the terms. For Bernoulli polynomials, we use the
formula in the definition.
* bernoulli(n) gives the nth Bernoulli number, B_n
* bernoulli(n, x) gives the nth Bernoulli polynomial in x, B_n(x)
Examples
========
>>> from sympy import bernoulli
>>> [bernoulli(n) for n in range(11)]
[1, -1/2, 1/6, 0, -1/30, 0, 1/42, 0, -1/30, 0, 5/66]
>>> bernoulli(1000001)
0
References
==========
.. [1] http://en.wikipedia.org/wiki/Bernoulli_number
.. [2] http://en.wikipedia.org/wiki/Bernoulli_polynomial
.. [3] http://mathworld.wolfram.com/BernoulliNumber.html
.. [4] http://mathworld.wolfram.com/BernoulliPolynomial.html
See Also
========
bell, catalan, euler, fibonacci, harmonic, lucas
"""
# Calculates B_n for positive even n
@staticmethod
def _calc_bernoulli(n):
s = 0
a = int(C.binomial(n + 3, n - 6))
for j in xrange(1, n//6 + 1):
s += a * bernoulli(n - 6*j)
# Avoid computing each binomial coefficient from scratch
a *= _product(n - 6 - 6*j + 1, n - 6*j)
a //= _product(6*j + 4, 6*j + 9)
if n % 6 == 4:
s = -Rational(n + 3, 6) - s
else:
s = Rational(n + 3, 3) - s
return s / C.binomial(n + 3, n)
# We implement a specialized memoization scheme to handle each
# case modulo 6 separately
_cache = {0: S.One, 2: Rational(1, 6), 4: Rational(-1, 30)}
_highest = {0: 0, 2: 2, 4: 4}
@classmethod
def eval(cls, n, sym=None):
if n.is_Number:
if n.is_Integer and n.is_nonnegative:
if n is S.Zero:
return S.One
elif n is S.One:
if sym is None:
return -S.Half
else:
return sym - S.Half
# Bernoulli numbers
elif sym is None:
if n.is_odd:
return S.Zero
n = int(n)
# Use mpmath for enormous Bernoulli numbers
if n > 500:
p, q = bernfrac(n)
return Rational(int(p), int(q))
case = n % 6
highest_cached = cls._highest[case]
if n <= highest_cached:
return cls._cache[n]
# To avoid excessive recursion when, say, bernoulli(1000) is
# requested, calculate and cache the entire sequence ... B_988,
# B_994, B_1000 in increasing order
for i in xrange(highest_cached + 6, n + 6, 6):
b = cls._calc_bernoulli(i)
cls._cache[i] = b
cls._highest[case] = i
return b
# Bernoulli polynomials
else:
n, result = int(n), []
for k in xrange(n + 1):
result.append(C.binomial(n, k)*cls(k)*sym**(n - k))
return Add(*result)
else:
raise ValueError("Bernoulli numbers are defined only"
" for nonnegative integer indices.")
#----------------------------------------------------------------------------#
# #
# Bell numbers #
# #
#----------------------------------------------------------------------------#
class bell(Function):
r"""
Bell numbers / Bell polynomials
The Bell numbers satisfy `B_0 = 1` and
.. math:: B_n = \sum_{k=0}^{n-1} \binom{n-1}{k} B_k.
They are also given by:
.. math:: B_n = \frac{1}{e} \sum_{k=0}^{\infty} \frac{k^n}{k!}.
The Bell polynomials are given by `B_0(x) = 1` and
.. math:: B_n(x) = x \sum_{k=1}^{n-1} \binom{n-1}{k-1} B_{k-1}(x).
The second kind of Bell polynomials (are sometimes called "partial" Bell
polynomials or incomplete Bell polynomials) are defined as
.. math:: B_{n,k}(x_1, x_2,\dotsc x_{n-k+1}) =
\sum_{j_1+j_2+j_2+\dotsb=k \atop j_1+2j_2+3j_2+\dotsb=n}
\frac{n!}{j_1!j_2!\dotsb j_{n-k+1}!}
\left(\frac{x_1}{1!} \right)^{j_1}
\left(\frac{x_2}{2!} \right)^{j_2} \dotsb
\left(\frac{x_{n-k+1}}{(n-k+1)!} \right) ^{j_{n-k+1}}.
* bell(n) gives the `n^{th}` Bell number, `B_n`.
* bell(n, x) gives the `n^{th}` Bell polynomial, `B_n(x)`.
* bell(n, k, (x1, x2, ...)) gives Bell polynomials of the second kind,
`B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})`.
Notes
=====
Not to be confused with Bernoulli numbers and Bernoulli polynomials,
which use the same notation.
Examples
========
>>> from sympy import bell, Symbol, symbols
>>> [bell(n) for n in range(11)]
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975]
>>> bell(30)
846749014511809332450147
>>> bell(4, Symbol('t'))
t**4 + 6*t**3 + 7*t**2 + t
>>> bell(6, 2, symbols('x:6')[1:])
6*x1*x5 + 15*x2*x4 + 10*x3**2
References
==========
.. [1] http://en.wikipedia.org/wiki/Bell_number
.. [2] http://mathworld.wolfram.com/BellNumber.html
.. [3] http://mathworld.wolfram.com/BellPolynomial.html
See Also
========
bernoulli, catalan, euler, fibonacci, harmonic, lucas
"""
@staticmethod
@recurrence_memo([1, 1])
def _bell(n, prev):
s = 1
a = 1
for k in xrange(1, n):
a = a * (n - k) // k
s += a * prev[k]
return s
@staticmethod
@recurrence_memo([S.One, _sym])
def _bell_poly(n, prev):
s = 1
a = 1
for k in xrange(2, n + 1):
a = a * (n - k + 1) // (k - 1)
s += a * prev[k - 1]
return expand_mul(_sym * s)
@staticmethod
def _bell_incomplete_poly(n, k, symbols):
r"""
The second kind of Bell polynomials (incomplete Bell polynomials).
Calculated by recurrence formula:
.. math:: B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1}) =
\sum_{m=1}^{n-k+1}
\x_m \binom{n-1}{m-1} B_{n-m,k-1}(x_1, x_2, \dotsc, x_{n-m-k})
where
B_{0,0} = 1;
B_{n,0} = 0; for n>=1
B_{0,k} = 0; for k>=1
"""
if (n == 0) and (k == 0):
return S.One
elif (n == 0) or (k == 0):
return S.Zero
s = S.Zero
a = S.One
for m in xrange(1, n - k + 2):
s += a * bell._bell_incomplete_poly(
n - m, k - 1, symbols) * symbols[m - 1]
a = a * (n - m) / m
return expand_mul(s)
@classmethod
def eval(cls, n, k_sym=None, symbols=None):
if n.is_Integer and n.is_nonnegative:
if k_sym is None:
return Integer(cls._bell(int(n)))
elif symbols is None:
return cls._bell_poly(int(n)).subs(_sym, k_sym)
else:
r = cls._bell_incomplete_poly(int(n), int(k_sym), symbols)
return r
#----------------------------------------------------------------------------#
# #
# Harmonic numbers #
# #
#----------------------------------------------------------------------------#
class harmonic(Function):
r"""
Harmonic numbers
The nth harmonic number is given by `\operatorname{H}_{n} =
1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n}`.
More generally:
.. math:: \operatorname{H}_{n,m} = \sum_{k=1}^{n} \frac{1}{k^m}
As `n \rightarrow \infty`, `\operatorname{H}_{n,m} \rightarrow \zeta(m)`,
the Riemann zeta function.
* ``harmonic(n)`` gives the nth harmonic number, `\operatorname{H}_n`
* ``harmonic(n, m)`` gives the nth generalized harmonic number
of order `m`, `\operatorname{H}_{n,m}`, where
``harmonic(n) == harmonic(n, 1)``
Examples
========
>>> from sympy import harmonic, oo
>>> [harmonic(n) for n in range(6)]
[0, 1, 3/2, 11/6, 25/12, 137/60]
>>> [harmonic(n, 2) for n in range(6)]
[0, 1, 5/4, 49/36, 205/144, 5269/3600]
>>> harmonic(oo, 2)
pi**2/6
>>> from sympy import Symbol, Sum
>>> n = Symbol("n")
>>> harmonic(n).rewrite(Sum)
Sum(1/_k, (_k, 1, n))
We can rewrite harmonic numbers in terms of polygamma functions:
>>> from sympy import digamma, polygamma
>>> m = Symbol("m")
>>> harmonic(n).rewrite(digamma)
polygamma(0, n + 1) + EulerGamma
>>> harmonic(n).rewrite(polygamma)
polygamma(0, n + 1) + EulerGamma
>>> harmonic(n,3).rewrite(polygamma)
polygamma(2, n + 1)/2 - polygamma(2, 1)/2
>>> harmonic(n,m).rewrite(polygamma)
(-1)**m*(polygamma(m - 1, 1) - polygamma(m - 1, n + 1))/factorial(m - 1)
Integer offsets in the argument can be pulled out:
>>> from sympy import expand_func
>>> expand_func(harmonic(n+4))
harmonic(n) + 1/(n + 4) + 1/(n + 3) + 1/(n + 2) + 1/(n + 1)
>>> expand_func(harmonic(n-4))
harmonic(n) - 1/(n - 1) - 1/(n - 2) - 1/(n - 3) - 1/n
Some limits can be computed as well:
>>> from sympy import limit, oo
>>> limit(harmonic(n), n, oo)
oo
>>> limit(harmonic(n, 2), n, oo)
pi**2/6
>>> limit(harmonic(n, 3), n, oo)
-polygamma(2, 1)/2
>>> limit(harmonic(m, n), m, oo)
zeta(n)
References
==========
.. [1] http://en.wikipedia.org/wiki/Harmonic_number
.. [2] http://functions.wolfram.com/GammaBetaErf/HarmonicNumber/
.. [3] http://functions.wolfram.com/GammaBetaErf/HarmonicNumber2/
See Also
========
bell, bernoulli, catalan, euler, fibonacci, lucas
"""
# Generate one memoized Harmonic number-generating function for each
# order and store it in a dictionary
_functions = {}
nargs = (1, 2)
@classmethod
def eval(cls, n, m=None):
if m is None:
m = S.One
if n == oo:
return C.zeta(m)
if n.is_Integer and n.is_nonnegative and m.is_Integer:
if n == 0:
return S.Zero
if not m in cls._functions:
@recurrence_memo([0])
def f(n, prev):
return prev[-1] + S.One / n**m
cls._functions[m] = f
return cls._functions[m](int(n))
def _eval_rewrite_as_polygamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return S.NegativeOne**m/factorial(m - 1) * (polygamma(m - 1, 1) - polygamma(m - 1, n + 1))
def _eval_rewrite_as_digamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma)
def _eval_rewrite_as_trigamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma)
def _eval_rewrite_as_Sum(self, n, m=None):
k = C.Dummy("k", integer=True)
if m is None:
m = S.One
return C.Sum(k**(-m), (k, 1, n))
def _eval_expand_func(self, **hints):
n = self.args[0]
m = self.args[1] if len(self.args) == 2 else 1
if m == S.One:
if n.is_Add:
off = n.args[0]
nnew = n - off
if off.is_Integer and off.is_positive:
result = [S.One/(nnew + i) for i in xrange(off, 0, -1)] + [harmonic(nnew)]
return Add(*result)
elif off.is_Integer and off.is_negative:
result = [-S.One/(nnew + i) for i in xrange(0, off, -1)] + [harmonic(nnew)]
return Add(*result)
return self
def _eval_rewrite_as_tractable(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma).rewrite("tractable", deep=True)
#----------------------------------------------------------------------------#
# #
# Euler numbers #
# #
#----------------------------------------------------------------------------#
class euler(Function):
r"""
Euler numbers
The euler numbers are given by::
2*n+1 k
___ ___ j 2*n+1
\ \ / k \ (-1) * (k-2*j)
E = I ) ) | | --------------------
2n /___ /___ \ j / k k
k = 1 j = 0 2 * I * k
E = 0
2n+1
* euler(n) gives the n-th Euler number, E_n
Examples
========
>>> from sympy import Symbol, euler
>>> [euler(n) for n in range(10)]
[1, 0, -1, 0, 5, 0, -61, 0, 1385, 0]
>>> n = Symbol("n")
>>> euler(n+2*n)
euler(3*n)
References
==========
.. [1] http://en.wikipedia.org/wiki/Euler_numbers
.. [2] http://mathworld.wolfram.com/EulerNumber.html
.. [3] http://en.wikipedia.org/wiki/Alternating_permutation
.. [4] http://mathworld.wolfram.com/AlternatingPermutation.html
See Also
========
bell, bernoulli, catalan, fibonacci, harmonic, lucas
"""
nargs = 1
@classmethod
def eval(cls, m, evaluate=True):
if not evaluate:
return
if m.is_odd:
return S.Zero
if m.is_Integer and m.is_nonnegative:
from sympy.mpmath import mp
m = m._to_mpmath(mp.prec)
res = mp.eulernum(m, exact=True)
return Integer(res)
def _eval_rewrite_as_Sum(self, arg):
if arg.is_even:
k = C.Dummy("k", integer=True)
j = C.Dummy("j", integer=True)
n = self.args[0] / 2
Em = (S.ImaginaryUnit * C.Sum( C.Sum( C.binomial(k, j) * ((-1)**j * (k - 2*j)**(2*n + 1)) /
(2**k*S.ImaginaryUnit**k * k), (j, 0, k)), (k, 1, 2*n + 1)))
return Em
def _eval_evalf(self, prec):
m = self.args[0]
if m.is_Integer and m.is_nonnegative:
from sympy.mpmath import mp
from sympy import Expr
m = m._to_mpmath(prec)
oprec = mp.prec
mp.prec = prec
res = mp.eulernum(m)
mp.prec = oprec
return Expr._from_mpmath(res, prec)
#----------------------------------------------------------------------------#
# #
# Catalan numbers #
# #
#----------------------------------------------------------------------------#
class catalan(Function):
r"""
Catalan numbers
The n-th catalan number is given by::
1 / 2*n \
C = ----- | |
n n + 1 \ n /
* catalan(n) gives the n-th Catalan number, C_n
Examples
========
>>> from sympy import (Symbol, binomial, gamma, hyper, polygamma,
... catalan, diff, combsimp, Rational, I)
>>> [ catalan(i) for i in range(1,10) ]
[1, 2, 5, 14, 42, 132, 429, 1430, 4862]
>>> n = Symbol("n", integer=True)
>>> catalan(n)
catalan(n)
Catalan numbers can be transformed into several other, identical
expressions involving other mathematical functions
>>> catalan(n).rewrite(binomial)
binomial(2*n, n)/(n + 1)
>>> catalan(n).rewrite(gamma)
4**n*gamma(n + 1/2)/(sqrt(pi)*gamma(n + 2))
>>> catalan(n).rewrite(hyper)
hyper((-n + 1, -n), (2,), 1)
For some non-integer values of n we can get closed form
expressions by rewriting in terms of gamma functions:
>>> catalan(Rational(1,2)).rewrite(gamma)
8/(3*pi)
We can differentiate the Catalan numbers C(n) interpreted as a
continuous real funtion in n:
>>> diff(catalan(n), n)
(polygamma(0, n + 1/2) - polygamma(0, n + 2) + log(4))*catalan(n)
As a more advanced example consider the following ratio
between consecutive numbers:
>>> combsimp((catalan(n + 1)/catalan(n)).rewrite(binomial))
2*(2*n + 1)/(n + 2)
The Catalan numbers can be generalized to complex numbers:
>>> catalan(I).rewrite(gamma)
4**I*gamma(1/2 + I)/(sqrt(pi)*gamma(2 + I))
and evaluated with arbitrary precision:
>>> catalan(I).evalf(20)
0.39764993382373624267 - 0.020884341620842555705*I
References
==========
.. [1] http://en.wikipedia.org/wiki/Catalan_number
.. [2] http://mathworld.wolfram.com/CatalanNumber.html
.. [3] http://functions.wolfram.com/GammaBetaErf/CatalanNumber/
.. [4] http://geometer.org/mathcircles/catalan.pdf
See Also
========
bell, bernoulli, euler, fibonacci, harmonic, lucas
sympy.functions.combinatorial.factorials.binomial
"""
@classmethod
def eval(cls, n, evaluate=True):
if n.is_Integer and n.is_nonnegative:
return 4**n*C.gamma(n + S.Half)/(C.gamma(S.Half)*C.gamma(n + 2))
def fdiff(self, argindex=1):
n = self.args[0]
return catalan(n)*(C.polygamma(0, n + Rational(1, 2)) - C.polygamma(0, n + 2) + C.log(4))
def _eval_rewrite_as_binomial(self, n):
return C.binomial(2*n, n)/(n + 1)
def _eval_rewrite_as_gamma(self, n):
# The gamma function allows to generalize Catalan numbers to complex n
return 4**n*C.gamma(n + S.Half)/(C.gamma(S.Half)*C.gamma(n + 2))
def _eval_rewrite_as_hyper(self, n):
return C.hyper([1 - n, -n], [2], 1)
def _eval_evalf(self, prec):
return self.rewrite(C.gamma).evalf(prec)
#######################################################################
###
### Functions for enumerating partitions, permutations and combinations
###
#######################################################################
class _MultisetHistogram(tuple):
pass
_N = -1
_ITEMS = -2
_M = slice(None, _ITEMS)
def _multiset_histogram(n):
"""Return tuple used in permutation and combination counting. Input
is a dictionary giving items with counts as values or a sequence of
items (which need not be sorted).
The data is stored in a class deriving from tuple so it is easily
recognized and so it can be converted easily to a list.
"""
if type(n) is dict: # item: count
if not all(isinstance(v, int) and v >= 0 for v in n.values()):
raise ValueError
tot = sum(n.values())
items = sum(1 for k in n if n[k] > 0)
return _MultisetHistogram([n[k] for k in n if n[k] > 0] + [items, tot])
else:
n = list(n)
s = set(n)
if len(s) == len(n):
n = [1]*len(n)
n.extend([len(n), len(n)])
return _MultisetHistogram(n)
m = dict(zip(s, range(len(s))))
d = dict(zip(range(len(s)), [0]*len(s)))
for i in n:
d[m[i]] += 1
return _multiset_histogram(d)
def nP(n, k=None, replacement=False):
"""Return the number of permutations of ``n`` items taken ``k`` at a time.
Possible values for ``n``::
integer - set of length ``n``
sequence - converted to a multiset internally
multiset - {element: multiplicity}
If ``k`` is None then the total of all permutations of length 0
through the number of items represented by ``n`` will be returned.
If ``replacement`` is True then a given item can appear more than once
in the ``k`` items. (For example, for 'ab' permutations of 2 would
include 'aa', 'ab', 'ba' and 'bb'.) The multiplicity of elements in
``n`` is ignored when ``replacement`` is True but the total number
of elements is considered since no element can appear more times than
the number of elements in ``n``.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nP
>>> from sympy.utilities.iterables import multiset_permutations, multiset
>>> nP(3, 2)
6
>>> nP('abc', 2) == nP(multiset('abc'), 2) == 6
True
>>> nP('aab', 2)
3
>>> nP([1, 2, 2], 2)
3
>>> [nP(3, i) for i in range(4)]
[1, 3, 6, 6]
>>> nP(3) == sum(_)
True
When ``replacement`` is True, each item can have multiplicity
equal to the length represented by ``n``:
>>> nP('aabc', replacement=True)
121
>>> [len(list(multiset_permutations('aaaabbbbcccc', i))) for i in range(5)]
[1, 3, 9, 27, 81]
>>> sum(_)
121
References
==========
.. [1] http://en.wikipedia.org/wiki/Permutation
See Also
========
sympy.utilities.iterables.multiset_permutations
"""
try:
n = as_int(n)
except ValueError:
return Integer(_nP(_multiset_histogram(n), k, replacement))
return Integer(_nP(n, k, replacement))
@cacheit
def _nP(n, k=None, replacement=False):
from sympy.functions.combinatorial.factorials import factorial
from sympy.core.mul import prod
if k == 0:
return 1
if isinstance(n, SYMPY_INTS): # n different items
# assert n >= 0
if k is None:
return sum(_nP(n, i, replacement) for i in range(n + 1))
elif replacement:
return n**k
elif k > n:
return 0
elif k == n:
return factorial(k)
elif k == 1:
return n
else:
# assert k >= 0
return _product(n - k + 1, n)
elif isinstance(n, _MultisetHistogram):
if k is None:
return sum(_nP(n, i, replacement) for i in range(n[_N] + 1))
elif replacement:
return n[_ITEMS]**k
elif k == n[_N]:
return factorial(k)/prod([factorial(i) for i in n[_M] if i > 1])
elif k > n[_N]:
return 0
elif k == 1:
return n[_ITEMS]
else:
# assert k >= 0
tot = 0
n = list(n)
for i in range(len(n[_M])):
if not n[i]:
continue
n[_N] -= 1
if n[i] == 1:
n[i] = 0
n[_ITEMS] -= 1
tot += _nP(_MultisetHistogram(n), k - 1)
n[_ITEMS] += 1
n[i] = 1
else:
n[i] -= 1
tot += _nP(_MultisetHistogram(n), k - 1)
n[i] += 1
n[_N] += 1
return tot
@cacheit
def _AOP_product(n):
"""for n = (m1, m2, .., mk) return the coefficients of the polynomial,
prod(sum(x**i for i in range(nj + 1)) for nj in n); i.e. the coefficients
of the product of AOPs (all-one polynomials) or order given in n. The
resulting coefficient corresponding to x**r is the number of r-length
combinations of sum(n) elements with multiplicities given in n.
The coefficients are given as a default dictionary (so if a query is made
for a key that is not present, 0 will be returned).
Examples
========
>>> from sympy.functions.combinatorial.numbers import _AOP_product
>>> from sympy.abc import x
>>> n = (2, 2, 3) # e.g. aabbccc
>>> prod = ((x**2 + x + 1)*(x**2 + x + 1)*(x**3 + x**2 + x + 1)).expand()
>>> c = _AOP_product(n); dict(c)
{0: 1, 1: 3, 2: 6, 3: 8, 4: 8, 5: 6, 6: 3, 7: 1}
>>> [c[i] for i in range(8)] == [prod.coeff(x, i) for i in range(8)]
True
The generating poly used here is the same as that listed in
http://tinyurl.com/cep849r, but in a refactored form.
"""
from collections import defaultdict
n = list(n)
ord = sum(n)
need = (ord + 2)//2
rv = [1]*(n.pop() + 1)
rv.extend([0]*(need - len(rv)))
rv = rv[:need]
while n:
ni = n.pop()
N = ni + 1
was = rv[:]
for i in range(1, min(N, len(rv))):
rv[i] += rv[i - 1]
for i in range(N, need):
rv[i] += rv[i - 1] - was[i - N]
rev = list(reversed(rv))
if ord % 2:
rv = rv + rev
else:
rv[-1:] = rev
d = defaultdict(int)
for i in range(len(rv)):
d[i] = rv[i]
return d
def nC(n, k=None, replacement=False):
"""Return the number of combinations of ``n`` items taken ``k`` at a time.
Possible values for ``n``::
integer - set of length ``n``
sequence - converted to a multiset internally
multiset - {element: multiplicity}
If ``k`` is None then the total of all combinations of length 0
through the number of items represented in ``n`` will be returned.
If ``replacement`` is True then a given item can appear more than once
in the ``k`` items. (For example, for 'ab' sets of 2 would include 'aa',
'ab', and 'bb'.) The multiplicity of elements in ``n`` is ignored when
``replacement`` is True but the total number of elements is considered
since no element can appear more times than the number of elements in
``n``.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nC
>>> from sympy.utilities.iterables import multiset_combinations
>>> nC(3, 2)
3
>>> nC('abc', 2)
3
>>> nC('aab', 2)
2
When ``replacement`` is True, each item can have multiplicity
equal to the length represented by ``n``:
>>> nC('aabc', replacement=True)
35
>>> [len(list(multiset_combinations('aaaabbbbcccc', i))) for i in range(5)]
[1, 3, 6, 10, 15]
>>> sum(_)
35
If there are ``k`` items with multiplicities ``m_1, m_2, ..., m_k``
then the total of all combinations of length 0 hrough ``k`` is the
product, ``(m_1 + 1)*(m_2 + 1)*...*(m_k + 1)``. When the multiplicity
of each item is 1 (i.e., k unique items) then there are 2**k
combinations. For example, if there are 4 unique items, the total number
of combinations is 16:
>>> sum(nC(4, i) for i in range(5))
16
References
==========
.. [1] http://en.wikipedia.org/wiki/Combination
.. [2] http://tinyurl.com/cep849r
See Also
========
sympy.utilities.iterables.multiset_combinations
"""
from sympy.functions.combinatorial.factorials import binomial
from sympy.core.mul import prod
if isinstance(n, SYMPY_INTS):
if k is None:
if not replacement:
return 2**n
return sum(nC(n, i, replacement) for i in range(n + 1))
assert k >= 0
if replacement:
return binomial(n + k - 1, k)
return binomial(n, k)
if isinstance(n, _MultisetHistogram):
N = n[_N]
if k is None:
if not replacement:
return prod(m + 1 for m in n[_M])
return sum(nC(n, i, replacement) for i in range(N + 1))
elif replacement:
return nC(n[_ITEMS], k, replacement)
# assert k >= 0
elif k in (1, N - 1):
return n[_ITEMS]
elif k in (0, N):
return 1
return _AOP_product(tuple(n[_M]))[k]
else:
return nC(_multiset_histogram(n), k, replacement)
@cacheit
def _stirling1(n, k):
if n == k == 0:
return S.One
if 0 in (n, k):
return S.Zero
n1 = n - 1
# some special values
if n == k:
return S.One
elif k == 1:
return factorial(n1)
elif k == n1:
return C.binomial(n, 2)
elif k == n - 2:
return (3*n - 1)*C.binomial(n, 3)/4
elif k == n - 3:
return C.binomial(n, 2)*C.binomial(n, 4)
# general recurrence
return n1*_stirling1(n1, k) + _stirling1(n1, k - 1)
@cacheit
def _stirling2(n, k):
if n == k == 0:
return S.One
if 0 in (n, k):
return S.Zero
n1 = n - 1
# some special values
if k == n1:
return C.binomial(n, 2)
elif k == 2:
return 2**n1 - 1
# general recurrence
return k*_stirling2(n1, k) + _stirling2(n1, k - 1)
def stirling(n, k, d=None, kind=2, signed=False):
"""Return Stirling number S(n, k) of the first or second (default) kind.
The sum of all Stirling numbers of the second kind for k = 1
through n is bell(n). The recurrence relationship for these numbers
is::
{0} {n} {0} {n + 1} {n} { n }
{ } = 1; { } = { } = 0; { } = j*{ } + { }
{0} {0} {k} { k } {k} {k - 1}
where ``j`` is::
``n`` for Stirling numbers of the first kind
``-n`` for signed Stirling numbers of the first kind
``k`` for Stirling numbers of the second kind
The first kind of Stirling number counts the number of permutations of
``n`` distinct items that have ``k`` cycles; the second kind counts the
ways in which ``n`` distinct items can be partitioned into ``k`` parts.
If ``d`` is given, the "reduced Stirling number of the second kind" is
returned: ``S^{d}(n, k) = S(n - d + 1, k - d + 1)`` with ``n >= k >= d``.
(This counts the ways to partition ``n`` consecutive integers into
``k`` groups with no pairwise difference less than ``d``. See example
below.)
To obtain the signed Stirling numbers of the first kind, use keyword
``signed=True``. Using this keyword automatically sets ``kind`` to 1.
Examples
========
>>> from sympy.functions.combinatorial.numbers import stirling, bell
>>> from sympy.combinatorics import Permutation
>>> from sympy.utilities.iterables import multiset_partitions, permutations
First kind (unsigned by default):
>>> [stirling(6, i, kind=1) for i in range(7)]
[0, 120, 274, 225, 85, 15, 1]
>>> perms = list(permutations(range(4)))
>>> [sum(Permutation(p).cycles == i for p in perms) for i in range(5)]
[0, 6, 11, 6, 1]
>>> [stirling(4, i, kind=1) for i in range(5)]
[0, 6, 11, 6, 1]
First kind (signed):
>>> [stirling(4, i, signed=True) for i in range(5)]
[0, -6, 11, -6, 1]
Second kind:
>>> [stirling(10, i) for i in range(12)]
[0, 1, 511, 9330, 34105, 42525, 22827, 5880, 750, 45, 1, 0]
>>> sum(_) == bell(10)
True
>>> len(list(multiset_partitions(range(4), 2))) == stirling(4, 2)
True
Reduced second kind:
>>> from sympy import subsets, oo
>>> def delta(p):
... if len(p) == 1:
... return oo
... return min(abs(i[0] - i[1]) for i in subsets(p, 2))
>>> parts = multiset_partitions(range(5), 3)
>>> d = 2
>>> sum(1 for p in parts if all(delta(i) >= d for i in p))
7
>>> stirling(5, 3, 2)
7
References
==========
.. [1] http://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind
.. [2] http://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
See Also
========
sympy.utilities.iterables.multiset_partitions
"""
# TODO: make this a class like bell()
n = as_int(n)
k = as_int(k)
if n < 0:
raise ValueError('n must be nonnegative')
if k > n:
return S.Zero
if d:
# assert k >= d
# kind is ignored -- only kind=2 is supported
return _stirling2(n - d + 1, k - d + 1)
elif signed:
# kind is ignored -- only kind=1 is supported
return (-1)**(n - k)*_stirling1(n, k)
if kind == 1:
return _stirling1(n, k)
elif kind == 2:
return _stirling2(n, k)
else:
raise ValueError('kind must be 1 or 2, not %s' % k)
@cacheit
def _nT(n, k):
"""Return the partitions of ``n`` items into ``k`` parts. This
is used by ``nT`` for the case when ``n`` is an integer."""
if k == 0:
return 1 if k == n else 0
return sum(_nT(n - k, j) for j in range(min(k, n - k) + 1))
def nT(n, k=None):
"""Return the number of ``k``-sized partitions of ``n`` items.
Possible values for ``n``::
integer - ``n`` identical items
sequence - converted to a multiset internally
multiset - {element: multiplicity}
Note: the convention for ``nT`` is different than that of ``nC`` and``nP`` in that
here an integer indicates ``n`` *identical* items instead of a set of
length ``n``; this is in keepng with the ``partitions`` function which
treats its integer-``n`` input like a list of ``n`` 1s. One can use
``range(n)`` for ``n`` to indicate ``n`` distinct items.
If ``k`` is None then the total number of ways to partition the elements
represented in ``n`` will be returned.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nT
Partitions of the given multiset:
>>> [nT('aabbc', i) for i in range(1, 7)]
[1, 8, 11, 5, 1, 0]
>>> nT('aabbc') == sum(_)
True
(TODO The following can be activated with >>> when
taocp_multiset_permutation is in place.)
>> [nT("mississippi", i) for i in range(1, 12)]
[1, 74, 609, 1521, 1768, 1224, 579, 197, 50, 9, 1]
Partitions when all items are identical:
>>> [nT(5, i) for i in range(1, 6)]
[1, 2, 2, 1, 1]
>>> nT('1'*5) == sum(_)
True
When all items are different:
>>> [nT(range(5), i) for i in range(1, 6)]
[1, 15, 25, 10, 1]
>>> nT(range(5)) == sum(_)
True
References
==========
.. [1] http://undergraduate.csse.uwa.edu.au/units/CITS7209/partition.pdf
See Also
========
sympy.utilities.iterables.partitions
sympy.utilities.iterables.multiset_partitions
"""
from sympy.utilities.iterables import multiset_partitions
if isinstance(n, SYMPY_INTS):
# assert n >= 0
# all the same
if k is None:
return sum(_nT(n, k) for k in range(1, n + 1))
return _nT(n, k)
if not isinstance(n, _MultisetHistogram):
try:
# if n contains hashable items there is some
# quick handling that can be done
u = len(set(n))
if u == 1:
return nT(len(n), k)
elif u == len(n):
n = range(u)
raise TypeError
except TypeError:
n = _multiset_histogram(n)
N = n[_N]
if k is None and N == 1:
return 1
if k in (1, N):
return 1
if k == 2 or N == 2 and k is None:
m, r = divmod(N, 2)
rv = sum(nC(n, i) for i in range(1, m + 1))
if not r:
rv -= nC(n, m)//2
if k is None:
rv += 1 # for k == 1
return rv
if N == n[_ITEMS]:
# all distinct
if k is None:
return bell(N)
return stirling(N, k)
if k is None:
return sum(nT(n, k) for k in range(1, N + 1))
tot = 0
for p in multiset_partitions(
[i for i, j in enumerate(n[_M]) for ii in range(j)]):
tot += len(p) == k
return tot
| gpl-3.0 | -7,633,730,731,319,739,000 | 29.53915 | 103 | 0.497644 | false |
semente/django-hashtags | hashtags/views.py | 1 | 3967 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2010 Guilherme Gondim and contributors
#
# This file is part of Django Hashtags.
#
# Django Hashtags is free software under terms of the GNU Lesser
# General Public License version 3 (LGPLv3) as published by the Free
# Software Foundation. See the file README for copying conditions.
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404, HttpResponse
from django.template import loader, RequestContext
from django.views.generic import list_detail
from hashtags.models import Hashtag, HashtaggedItem
def hashtag_index(request, *args, **kwargs):
"""
A thin wrapper around ``django.views.generic.list_detail.object_list``.
You don't need provide the ``queryset`` if you want.
The ``template_object_name`` by default is ``'hashtag'``. This mean that the
context variable ``object_list`` will be renamed to ``hashtag_list``.
**Template name**:
If ``template_name`` isn't specified, this view will use the template
``hashtags/hashtag_index.html`` by default.
"""
if 'queryset' not in kwargs:
kwargs['queryset'] = Hashtag.objects.all()
if 'template_name' not in kwargs:
kwargs['template_name'] = 'hashtags/hashtag_index.html'
if 'template_object_name' not in kwargs:
kwargs['template_object_name'] = 'hashtag'
return list_detail.object_list(request, *args, **kwargs)
def hashtagged_item_list(request, hashtag, paginate_by=None, page=None,
allow_empty=True, template_loader=loader,
template_name="hashtags/hashtagged_item_list.html",
extra_context={}, context_processors=None,
template_object_name='hashtagged_item_list',
mimetype=None):
"""
A page representing a list of objects hastagged with ``hashtag``.
Works like ``django.views.generic.list_detail.object_list`.
Templates: ``hashtags/hashtagged_item_list.html``
Context:
hashtag
The hashtag object in question
hashtagged_item_list
The list of objects hashtagged with ``hastag``
paginator
An instance of ``django.core.paginator.Paginator``
page_obj
An instance of ``django.core.paginator.Page``
"""
try:
hashtag = Hashtag.objects.get(name=hashtag)
except ObjectDoesNotExist:
raise Http404("Hashtag %s doesn't exist." % hashtag)
queryset = HashtaggedItem.objects.filter(hashtag=hashtag)
if paginate_by:
paginator = Paginator(queryset, paginate_by,
allow_empty_first_page=allow_empty)
if not page:
page = request.GET.get('page', 1)
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
# Page is not 'last', nor can it be converted to an int.
raise Http404
try:
page_obj = paginator.page(page_number)
except InvalidPage:
raise Http404
c = RequestContext(request, {
'hashtag': hashtag,
template_object_name: queryset,
'paginator': paginator,
'page_obj': page_obj,
}, context_processors)
else:
c = RequestContext(request, {
'hashtag': hashtag,
template_object_name: queryset,
'paginator': None,
'page_obj': None,
}, context_processors)
if not allow_empty and len(queryset) == 0:
raise Http404
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
t = template_loader.get_template(template_name)
return HttpResponse(t.render(c), mimetype=mimetype)
| lgpl-3.0 | -5,713,242,525,883,983,000 | 37.144231 | 80 | 0.617595 | false |
cheesechoi/Triton | cheese/test/cheese_getModelcheck.jle.jg.py | 1 | 5145 | from triton import *
import smt2lib
"""
Address 0x400547 progress
[+] Address <cmp argv[1][0] 0x41>
{'SymVar_0': "0x50, 'P'"}
{'SymVar_0': "0x60, '`'"}
{'SymVar_0': "0x5a, 'Z'"}
{'SymVar_0': "0x4a, 'J'"}
{'SymVar_0': "0x42, 'B'"}
{'SymVar_0': "0x62, 'b'"}
{'SymVar_0': "0x6a, 'j'"}
{'SymVar_0': "0x68, 'h'"}
{'SymVar_0': "0x69, 'i'"}
{'SymVar_0': "0x49, 'I'"}
[+] Address <cmp argv[1][0] 0x59>
{'SymVar_0': "0x50, 'P'"}
{'SymVar_0': "0x59, 'Y'"}
{'SymVar_0': "0x58, 'X'"}
{'SymVar_0': "0x48, 'H'"}
{'SymVar_0': "0x44, 'D'"}
{'SymVar_0': "0x4c, 'L'"}
{'SymVar_0': "0x54, 'T'"}
{'SymVar_0': "0x49, 'I'"}
{'SymVar_0': "0x4d, 'M'"}
{'SymVar_0': "0x4f, 'O'"}
nope!
"""
expr = str()
listExpr = list()
def sbefore(instruction):
concretizeAllMem()
concretizeAllReg()
return
def cafter(instruction):
# evaluateAST Test
if 0x400551 == instruction.getAddress(): # jle
bad = list()
regs = getRegs()
for reg, data in regs.items():
#print getRegName(reg)
if 'rip' != getRegName(reg):
continue
cvalue = data['concreteValue']
seid = data['symbolicExpr']
#print "seid %d"%seid
if seid == IDREF.MISC.UNSET:
#print "unset %d"%IDREF.MISC.UNSET
continue
#print "IDREF.MISC.UNSET %d"%IDREF.MISC.UNSET
#print "test:%s %s"%(getRegName(reg), data)
#print getSymExpr(seid)
print getSymExpr(seid).getAst()
expr = getFullExpression(getSymExpr(seid).getAst())
print "excute evalueateAST(expr) --> evalueateAST(%s)"%expr
svalue = evaluateAST(expr)
print svalue
if cvalue != svalue:
bad.append({
'reg':getRegName(reg),
'svalue': svalue,
'cvalue': cvalue,
'expr':getSymExpr(seid).getAst()
})
if len(instruction.getSymbolicExpressions()) == 0:
print "[??] %#x: %s"%(instruction.getAddress(), instruction.getDisassembly())
return
if not bad:
print "[OK] %#x: %s"%(instruction.getAddress(), instruction.getDisassembly())
else:
print "### [KO] ### %#x: %s"%(instruction.getAddress(), instruction.getDisassembly())
for w in bad:
print " Register : %s"%(w['reg'])
print " Symbolic Value : %016x"%(w['svalue'])
print " Concrete Value : %016x"%(w['cvalue'])
print " Expression : %s"%(w['expr'])
return
# 0x0000000000400547 <+26>: movzx eax,BYTE PTR [rax]
if 0x400547 == instruction.getAddress():# == 0x400547:
print "Address 0x400547 progress"
raxId = getRegSymbolicID(IDREF.REG.RAX)
print getSymExpr(raxId)
#convertExprToSymVar(raxId, 8) #only 8bit
# 0x000000000040054d <+32>: cmp BYTE PTR [rbp-0x1],0x41
if instruction.getAddress() == 0x40054d:
print '[+] Address <cmp argv[1][0] 0x41>'
# WE DONT WANT JUMP
# 0x0000000000400551 <+36>: jle 0x40056a <main+61>
# jump if less or equal . ZF = 1 or SF <> OF.
# ZF = 0 and SF == OF
zfId = getRegSymbolicID(IDREF.FLAG.ZF)
zfExpr = getFullExpression(getSymExpr(zfId).getAst())
sfId = getRegSymbolicID(IDREF.FLAG.SF)
sfExpr = getFullExpression(getSymExpr(sfId).getAst())
ofId = getRegSymbolicID(IDREF.FLAG.OF)
ofExpr = getFullExpression(getSymExpr(ofId).getAst())
listExpr.append(smt2lib.smtAssert(smt2lib.equal(zfExpr, smt2lib.bvfalse())))
listExpr.append(smt2lib.smtAssert(smt2lib.equal(sfExpr, ofExpr)))
exprComp = smt2lib.compound(listExpr)
models = getModels(exprComp, 10)
for model in models:
print {k: "0x%x, '%c'" % (v, v) for k, v in model.items()}
raw_input()
#0x0000000000400553 <+38>: cmp BYTE PTR [rbp-0x1],0x59
if instruction.getAddress() == 0x400553:
print '[+] Address <cmp argv[1][0] 0x59>'
# WE DONT WANT JUMP, TOO.
# 0x0000000000400557 <+42>: jg 0x40056a <main+61>
# jmp if greater. ZF = 0 and SF = OF
# ZF = 1 or SF <> OF
zfId = getRegSymbolicID(IDREF.FLAG.ZF)
zfExpr = getFullExpression(getSymExpr(zfId).getAst())
sfId = getRegSymbolicID(IDREF.FLAG.SF)
sfExpr = getFullExpression(getSymExpr(sfId).getAst())
ofId = getRegSymbolicID(IDREF.FLAG.OF)
ofExpr = getFullExpression(getSymExpr(ofId).getAst())
exprJgNotJump = smt2lib.equal(smt2lib.bvor(smt2lib.bvxor(sfExpr,ofExpr), zfExpr), smt2lib.bvtrue())
listExpr.append( smt2lib.smtAssert(exprJgNotJump) )
exprComp = smt2lib.compound(listExpr)
models = getModels(exprComp, 10)
for model in models:
print {k: "0x%x, '%c'" % (v, v) for k, v in model.items()}
raw_input()
if __name__ == '__main__':
startAnalysisFromSymbol('main')
addCallback(cafter, IDREF.CALLBACK.AFTER)
runProgram()
| lgpl-3.0 | 5,614,911,014,887,900,000 | 31.563291 | 100 | 0.554325 | false |
dag/genshi | genshi/template/markup.py | 1 | 16464 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Markup templating engine."""
from itertools import chain
from genshi.core import Attrs, Markup, Namespace, Stream, StreamEventKind
from genshi.core import START, END, START_NS, END_NS, TEXT, PI, COMMENT
from genshi.input import XMLParser
from genshi.template.base import BadDirectiveError, Template, \
TemplateSyntaxError, _apply_directives, \
EXEC, INCLUDE, SUB
from genshi.template.eval import Suite
from genshi.template.interpolation import interpolate
from genshi.template.directives import *
from genshi.template.text import NewTextTemplate
__all__ = ['MarkupTemplate']
__docformat__ = 'restructuredtext en'
class MarkupTemplate(Template):
"""Implementation of the template language for XML-based templates.
>>> tmpl = MarkupTemplate('''<ul xmlns:py="http://genshi.edgewall.org/">
... <li py:for="item in items">${item}</li>
... </ul>''')
>>> print(tmpl.generate(items=[1, 2, 3]))
<ul>
<li>1</li><li>2</li><li>3</li>
</ul>
"""
DIRECTIVE_NAMESPACE = 'http://genshi.edgewall.org/'
XINCLUDE_NAMESPACE = 'http://www.w3.org/2001/XInclude'
directives = [('def', DefDirective),
('match', MatchDirective),
('when', WhenDirective),
('otherwise', OtherwiseDirective),
('for', ForDirective),
('if', IfDirective),
('choose', ChooseDirective),
('with', WithDirective),
('replace', ReplaceDirective),
('content', ContentDirective),
('attrs', AttrsDirective),
('strip', StripDirective)]
serializer = 'xml'
_number_conv = Markup
def __init__(self, source, filepath=None, filename=None, loader=None,
encoding=None, lookup='strict', allow_exec=True):
Template.__init__(self, source, filepath=filepath, filename=filename,
loader=loader, encoding=encoding, lookup=lookup,
allow_exec=allow_exec)
self.add_directives(self.DIRECTIVE_NAMESPACE, self)
def _init_filters(self):
Template._init_filters(self)
# Make sure the include filter comes after the match filter
self.filters.remove(self._include)
self.filters += [self._match, self._include]
def _parse(self, source, encoding):
if not isinstance(source, Stream):
source = XMLParser(source, filename=self.filename,
encoding=encoding)
stream = []
for kind, data, pos in source:
if kind is TEXT:
for kind, data, pos in interpolate(data, self.filepath, pos[1],
pos[2], lookup=self.lookup):
stream.append((kind, data, pos))
elif kind is PI and data[0] == 'python':
if not self.allow_exec:
raise TemplateSyntaxError('Python code blocks not allowed',
self.filepath, *pos[1:])
try:
suite = Suite(data[1], self.filepath, pos[1],
lookup=self.lookup)
except SyntaxError, err:
raise TemplateSyntaxError(err, self.filepath,
pos[1] + (err.lineno or 1) - 1,
pos[2] + (err.offset or 0))
stream.append((EXEC, suite, pos))
elif kind is COMMENT:
if not data.lstrip().startswith('!'):
stream.append((kind, data, pos))
else:
stream.append((kind, data, pos))
return stream
def _extract_directives(self, stream, namespace, factory):
depth = 0
dirmap = {} # temporary mapping of directives to elements
new_stream = []
ns_prefix = {} # namespace prefixes in use
for kind, data, pos in stream:
if kind is START:
tag, attrs = data
directives = []
strip = False
if tag.namespace == namespace:
cls = factory.get_directive(tag.localname)
if cls is None:
raise BadDirectiveError(tag.localname,
self.filepath, pos[1])
args = dict([(name.localname, value) for name, value
in attrs if not name.namespace])
directives.append((factory.get_directive_index(cls), cls,
args, ns_prefix.copy(), pos))
strip = True
new_attrs = []
for name, value in attrs:
if name.namespace == namespace:
cls = factory.get_directive(name.localname)
if cls is None:
raise BadDirectiveError(name.localname,
self.filepath, pos[1])
if type(value) is list and len(value) == 1:
value = value[0][1]
directives.append((factory.get_directive_index(cls),
cls, value, ns_prefix.copy(), pos))
else:
new_attrs.append((name, value))
new_attrs = Attrs(new_attrs)
if directives:
directives.sort()
dirmap[(depth, tag)] = (directives, len(new_stream),
strip)
new_stream.append((kind, (tag, new_attrs), pos))
depth += 1
elif kind is END:
depth -= 1
new_stream.append((kind, data, pos))
# If there have have directive attributes with the
# corresponding start tag, move the events inbetween into
# a "subprogram"
if (depth, data) in dirmap:
directives, offset, strip = dirmap.pop((depth, data))
substream = new_stream[offset:]
if strip:
substream = substream[1:-1]
new_stream[offset:] = [
(SUB, (directives, substream), pos)
]
elif kind is SUB:
directives, substream = data
substream = self._extract_directives(substream, namespace,
factory)
if len(substream) == 1 and substream[0][0] is SUB:
added_directives, substream = substream[0][1]
directives += added_directives
new_stream.append((kind, (directives, substream), pos))
elif kind is START_NS:
# Strip out the namespace declaration for template
# directives
prefix, uri = data
ns_prefix[prefix] = uri
if uri != namespace:
new_stream.append((kind, data, pos))
elif kind is END_NS:
uri = ns_prefix.pop(data, None)
if uri and uri != namespace:
new_stream.append((kind, data, pos))
else:
new_stream.append((kind, data, pos))
return new_stream
def _extract_includes(self, stream):
streams = [[]] # stacked lists of events of the "compiled" template
prefixes = {}
fallbacks = []
includes = []
xinclude_ns = Namespace(self.XINCLUDE_NAMESPACE)
for kind, data, pos in stream:
stream = streams[-1]
if kind is START:
# Record any directive attributes in start tags
tag, attrs = data
if tag in xinclude_ns:
if tag.localname == 'include':
include_href = attrs.get('href')
if not include_href:
raise TemplateSyntaxError('Include misses required '
'attribute "href"',
self.filepath, *pos[1:])
includes.append((include_href, attrs.get('parse')))
streams.append([])
elif tag.localname == 'fallback':
streams.append([])
fallbacks.append(streams[-1])
else:
stream.append((kind, (tag, attrs), pos))
elif kind is END:
if fallbacks and data == xinclude_ns['fallback']:
assert streams.pop() is fallbacks[-1]
elif data == xinclude_ns['include']:
fallback = None
if len(fallbacks) == len(includes):
fallback = fallbacks.pop()
streams.pop() # discard anything between the include tags
# and the fallback element
stream = streams[-1]
href, parse = includes.pop()
try:
cls = {
'xml': MarkupTemplate,
'text': NewTextTemplate
}.get(parse) or self.__class__
except KeyError:
raise TemplateSyntaxError('Invalid value for "parse" '
'attribute of include',
self.filepath, *pos[1:])
stream.append((INCLUDE, (href, cls, fallback), pos))
else:
stream.append((kind, data, pos))
elif kind is START_NS and data[1] == xinclude_ns:
# Strip out the XInclude namespace
prefixes[data[0]] = data[1]
elif kind is END_NS and data in prefixes:
prefixes.pop(data)
else:
stream.append((kind, data, pos))
assert len(streams) == 1
return streams[0]
def _interpolate_attrs(self, stream):
for kind, data, pos in stream:
if kind is START:
# Record any directive attributes in start tags
tag, attrs = data
new_attrs = []
for name, value in attrs:
if value:
value = list(interpolate(value, self.filepath, pos[1],
pos[2], lookup=self.lookup))
if len(value) == 1 and value[0][0] is TEXT:
value = value[0][1]
new_attrs.append((name, value))
data = tag, Attrs(new_attrs)
yield kind, data, pos
def _prepare(self, stream):
return Template._prepare(self,
self._extract_includes(self._interpolate_attrs(stream))
)
def add_directives(self, namespace, factory):
"""Register a custom `DirectiveFactory` for a given namespace.
:param namespace: the namespace URI
:type namespace: `basestring`
:param factory: the directive factory to register
:type factory: `DirectiveFactory`
:since: version 0.6
"""
assert not self._prepared, 'Too late for adding directives, ' \
'template already prepared'
self._stream = self._extract_directives(self._stream, namespace,
factory)
def _match(self, stream, ctxt, start=0, end=None, **vars):
"""Internal stream filter that applies any defined match templates
to the stream.
"""
match_templates = ctxt._match_templates
def _strip(stream, append):
depth = 1
next = stream.next
while 1:
event = next()
if event[0] is START:
depth += 1
elif event[0] is END:
depth -= 1
if depth > 0:
yield event
else:
append(event)
break
for event in stream:
# We (currently) only care about start and end events for matching
# We might care about namespace events in the future, though
if not match_templates or (event[0] is not START and
event[0] is not END):
yield event
continue
for idx, (test, path, template, hints, namespaces, directives) \
in enumerate(match_templates):
if idx < start or end is not None and idx >= end:
continue
if test(event, namespaces, ctxt) is True:
if 'match_once' in hints:
del match_templates[idx]
idx -= 1
# Let the remaining match templates know about the event so
# they get a chance to update their internal state
for test in [mt[0] for mt in match_templates[idx + 1:]]:
test(event, namespaces, ctxt, updateonly=True)
# Consume and store all events until an end event
# corresponding to this start event is encountered
pre_end = idx + 1
if 'match_once' not in hints and 'not_recursive' in hints:
pre_end -= 1
tail = []
inner = _strip(stream, tail.append)
if pre_end > 0:
inner = self._match(inner, ctxt, start=start,
end=pre_end, **vars)
content = self._include(chain([event], inner, tail), ctxt)
if 'not_buffered' not in hints:
content = list(content)
content = Stream(content)
# Make the select() function available in the body of the
# match template
selected = [False]
def select(path):
selected[0] = True
return content.select(path, namespaces, ctxt)
vars = dict(select=select)
# Recursively process the output
template = _apply_directives(template, directives, ctxt,
vars)
for event in self._match(self._flatten(template, ctxt,
**vars),
ctxt, start=idx + 1, **vars):
yield event
# If the match template did not actually call select to
# consume the matched stream, the original events need to
# be consumed here or they'll get appended to the output
if not selected[0]:
for event in content:
pass
# Let the remaining match templates know about the last
# event in the matched content, so they can update their
# internal state accordingly
for test in [mt[0] for mt in match_templates[idx + 1:]]:
test(tail[0], namespaces, ctxt, updateonly=True)
break
else: # no matches
yield event
| bsd-3-clause | 529,692,708,747,131,260 | 40.471033 | 80 | 0.476312 | false |
iulian787/spack | lib/spack/spack/test/config.py | 2 | 33282 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import collections
import getpass
import tempfile
from six import StringIO
from llnl.util.filesystem import touch, mkdirp
import pytest
import spack.paths
import spack.config
import spack.main
import spack.schema.compilers
import spack.schema.config
import spack.schema.env
import spack.schema.packages
import spack.schema.mirrors
import spack.schema.repos
import spack.util.spack_yaml as syaml
import spack.util.path as spack_path
# sample config data
config_low = {
'config': {
'install_tree': {'root': 'install_tree_path'},
'build_stage': ['path1', 'path2', 'path3']}}
config_override_all = {
'config:': {
'install_tree:': {'root': 'override_all'}}}
config_override_key = {
'config': {
'install_tree:': {'root': 'override_key'}}}
config_merge_list = {
'config': {
'build_stage': ['patha', 'pathb']}}
config_override_list = {
'config': {
'build_stage:': ['pathd', 'pathe']}}
config_merge_dict = {
'config': {
'info': {
'a': 3,
'b': 4}}}
config_override_dict = {
'config': {
'info:': {
'a': 7,
'c': 9}}}
@pytest.fixture()
def write_config_file(tmpdir):
"""Returns a function that writes a config file."""
def _write(config, data, scope):
config_yaml = tmpdir.join(scope, config + '.yaml')
config_yaml.ensure()
with config_yaml.open('w') as f:
syaml.dump_config(data, f)
return _write
def check_compiler_config(comps, *compiler_names):
"""Check that named compilers in comps match Spack's config."""
config = spack.config.get('compilers')
compiler_list = ['cc', 'cxx', 'f77', 'fc']
flag_list = ['cflags', 'cxxflags', 'fflags', 'cppflags',
'ldflags', 'ldlibs']
param_list = ['modules', 'paths', 'spec', 'operating_system']
for compiler in config:
conf = compiler['compiler']
if conf['spec'] in compiler_names:
comp = next((c['compiler'] for c in comps if
c['compiler']['spec'] == conf['spec']), None)
if not comp:
raise ValueError('Bad config spec')
for p in param_list:
assert conf[p] == comp[p]
for f in flag_list:
expected = comp.get('flags', {}).get(f, None)
actual = conf.get('flags', {}).get(f, None)
assert expected == actual
for c in compiler_list:
expected = comp['paths'][c]
actual = conf['paths'][c]
assert expected == actual
#
# Some sample compiler config data and tests.
#
a_comps = {
'compilers': [
{'compiler': {
'paths': {
"cc": "/gcc473",
"cxx": "/g++473",
"f77": None,
"fc": None
},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "/gcc450",
"cxx": "/g++450",
"f77": 'gfortran',
"fc": 'gfortran'
},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "/gcc422",
"cxx": "/g++422",
"f77": 'gfortran',
"fc": 'gfortran'
},
'flags': {
"cppflags": "-O0 -fpic",
"fflags": "-f77",
},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "<overwritten>",
"cxx": "<overwritten>",
"f77": '<overwritten>',
"fc": '<overwritten>'},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}}
]
}
b_comps = {
'compilers': [
{'compiler': {
'paths': {
"cc": "/icc100",
"cxx": "/icp100",
"f77": None,
"fc": None
},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "/icc111",
"cxx": "/icp111",
"f77": 'ifort',
"fc": 'ifort'
},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "/icc123",
"cxx": "/icp123",
"f77": 'ifort',
"fc": 'ifort'
},
'flags': {
"cppflags": "-O3",
"fflags": "-f77rtl",
},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "<overwritten>",
"cxx": "<overwritten>",
"f77": '<overwritten>',
"fc": '<overwritten>'},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}}
]
}
@pytest.fixture()
def compiler_specs():
"""Returns a couple of compiler specs needed for the tests"""
a = [ac['compiler']['spec'] for ac in a_comps['compilers']]
b = [bc['compiler']['spec'] for bc in b_comps['compilers']]
CompilerSpecs = collections.namedtuple('CompilerSpecs', ['a', 'b'])
return CompilerSpecs(a=a, b=b)
def test_write_key_in_memory(mock_low_high_config, compiler_specs):
# Write b_comps "on top of" a_comps.
spack.config.set('compilers', a_comps['compilers'], scope='low')
spack.config.set('compilers', b_comps['compilers'], scope='high')
# Make sure the config looks how we expect.
check_compiler_config(a_comps['compilers'], *compiler_specs.a)
check_compiler_config(b_comps['compilers'], *compiler_specs.b)
def test_write_key_to_disk(mock_low_high_config, compiler_specs):
# Write b_comps "on top of" a_comps.
spack.config.set('compilers', a_comps['compilers'], scope='low')
spack.config.set('compilers', b_comps['compilers'], scope='high')
# Clear caches so we're forced to read from disk.
spack.config.config.clear_caches()
# Same check again, to ensure consistency.
check_compiler_config(a_comps['compilers'], *compiler_specs.a)
check_compiler_config(b_comps['compilers'], *compiler_specs.b)
def test_write_to_same_priority_file(mock_low_high_config, compiler_specs):
# Write b_comps in the same file as a_comps.
spack.config.set('compilers', a_comps['compilers'], scope='low')
spack.config.set('compilers', b_comps['compilers'], scope='low')
# Clear caches so we're forced to read from disk.
spack.config.config.clear_caches()
# Same check again, to ensure consistency.
check_compiler_config(a_comps['compilers'], *compiler_specs.a)
check_compiler_config(b_comps['compilers'], *compiler_specs.b)
#
# Sample repo data and tests
#
repos_low = {'repos': ["/some/path"]}
repos_high = {'repos': ["/some/other/path"]}
# repos
def test_write_list_in_memory(mock_low_high_config):
spack.config.set('repos', repos_low['repos'], scope='low')
spack.config.set('repos', repos_high['repos'], scope='high')
config = spack.config.get('repos')
assert config == repos_high['repos'] + repos_low['repos']
def test_substitute_config_variables(mock_low_high_config):
prefix = spack.paths.prefix.lstrip('/')
assert os.path.join(
'/foo/bar/baz', prefix
) == spack_path.canonicalize_path('/foo/bar/baz/$spack')
assert os.path.join(
spack.paths.prefix, 'foo/bar/baz'
) == spack_path.canonicalize_path('$spack/foo/bar/baz/')
assert os.path.join(
'/foo/bar/baz', prefix, 'foo/bar/baz'
) == spack_path.canonicalize_path('/foo/bar/baz/$spack/foo/bar/baz/')
assert os.path.join(
'/foo/bar/baz', prefix
) == spack_path.canonicalize_path('/foo/bar/baz/${spack}')
assert os.path.join(
spack.paths.prefix, 'foo/bar/baz'
) == spack_path.canonicalize_path('${spack}/foo/bar/baz/')
assert os.path.join(
'/foo/bar/baz', prefix, 'foo/bar/baz'
) == spack_path.canonicalize_path('/foo/bar/baz/${spack}/foo/bar/baz/')
assert os.path.join(
'/foo/bar/baz', prefix, 'foo/bar/baz'
) != spack_path.canonicalize_path('/foo/bar/baz/${spack/foo/bar/baz/')
packages_merge_low = {
'packages': {
'foo': {
'variants': ['+v1']
},
'bar': {
'variants': ['+v2']
}
}
}
packages_merge_high = {
'packages': {
'foo': {
'version': ['a']
},
'bar': {
'version': ['b'],
'variants': ['+v3']
},
'baz': {
'version': ['c']
}
}
}
@pytest.mark.regression('7924')
def test_merge_with_defaults(mock_low_high_config, write_config_file):
"""This ensures that specified preferences merge with defaults as
expected. Originally all defaults were initialized with the
exact same object, which led to aliasing problems. Therefore
the test configs used here leave 'version' blank for multiple
packages in 'packages_merge_low'.
"""
write_config_file('packages', packages_merge_low, 'low')
write_config_file('packages', packages_merge_high, 'high')
cfg = spack.config.get('packages')
assert cfg['foo']['version'] == ['a']
assert cfg['bar']['version'] == ['b']
assert cfg['baz']['version'] == ['c']
def test_substitute_user(mock_low_high_config):
user = getpass.getuser()
assert '/foo/bar/' + user + '/baz' == spack_path.canonicalize_path(
'/foo/bar/$user/baz'
)
def test_substitute_tempdir(mock_low_high_config):
tempdir = tempfile.gettempdir()
assert tempdir == spack_path.canonicalize_path('$tempdir')
assert tempdir + '/foo/bar/baz' == spack_path.canonicalize_path(
'$tempdir/foo/bar/baz'
)
PAD_STRING = spack.util.path.SPACK_PATH_PADDING_CHARS
MAX_PATH_LEN = spack.util.path.get_system_path_max()
MAX_PADDED_LEN = MAX_PATH_LEN - spack.util.path.SPACK_MAX_INSTALL_PATH_LENGTH
reps = [PAD_STRING for _ in range((MAX_PADDED_LEN // len(PAD_STRING) + 1) + 2)]
full_padded_string = os.path.join(
'/path', os.path.sep.join(reps))[:MAX_PADDED_LEN]
@pytest.mark.parametrize('config_settings,expected', [
([], [None, None, None]),
([['config:install_tree:root', '/path']], ['/path', None, None]),
([['config:install_tree', '/path']], ['/path', None, None]),
([['config:install_tree:projections', {'all': '{name}'}]],
[None, None, {'all': '{name}'}]),
([['config:install_path_scheme', '{name}']],
[None, None, {'all': '{name}'}]),
([['config:install_tree:root', '/path'],
['config:install_tree:padded_length', 11]],
[os.path.join('/path', PAD_STRING[:5]), '/path', None]),
([['config:install_tree:root', '/path/$padding:11']],
[os.path.join('/path', PAD_STRING[:5]), '/path', None]),
([['config:install_tree', '/path/${padding:11}']],
[os.path.join('/path', PAD_STRING[:5]), '/path', None]),
([['config:install_tree:padded_length', False]], [None, None, None]),
([['config:install_tree:padded_length', True],
['config:install_tree:root', '/path']],
[full_padded_string, '/path', None]),
([['config:install_tree:', '/path$padding']],
[full_padded_string, '/path', None]),
([['config:install_tree:', '/path/${padding}']],
[full_padded_string, '/path', None]),
])
def test_parse_install_tree(config_settings, expected, mutable_config):
expected_root = expected[0] or spack.store.default_install_tree_root
expected_unpadded_root = expected[1] or expected_root
expected_proj = expected[2] or spack.directory_layout.default_projections
# config settings is a list of 2-element lists, [path, value]
# where path is a config path and value is the value to set at that path
# these can be "splatted" in as the arguments to config.set
for config_setting in config_settings:
mutable_config.set(*config_setting)
config_dict = mutable_config.get('config')
root, unpadded_root, projections = spack.store.parse_install_tree(
config_dict)
assert root == expected_root
assert unpadded_root == expected_unpadded_root
assert projections == expected_proj
def test_read_config(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
assert spack.config.get('config') == config_low['config']
def test_read_config_override_all(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
write_config_file('config', config_override_all, 'high')
assert spack.config.get('config') == {
'install_tree': {
'root': 'override_all'
}
}
def test_read_config_override_key(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
write_config_file('config', config_override_key, 'high')
assert spack.config.get('config') == {
'install_tree': {
'root': 'override_key'
},
'build_stage': ['path1', 'path2', 'path3']
}
def test_read_config_merge_list(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
write_config_file('config', config_merge_list, 'high')
assert spack.config.get('config') == {
'install_tree': {
'root': 'install_tree_path'
},
'build_stage': ['patha', 'pathb', 'path1', 'path2', 'path3']
}
def test_read_config_override_list(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
write_config_file('config', config_override_list, 'high')
assert spack.config.get('config') == {
'install_tree': {
'root': 'install_tree_path'
},
'build_stage': config_override_list['config']['build_stage:']
}
def test_ordereddict_merge_order():
""""Test that source keys come before dest keys in merge_yaml results."""
source = syaml.syaml_dict([
("k1", "v1"),
("k2", "v2"),
("k3", "v3"),
])
dest = syaml.syaml_dict([
("k4", "v4"),
("k3", "WRONG"),
("k5", "v5"),
])
result = spack.config.merge_yaml(dest, source)
assert "WRONG" not in result.values()
expected_keys = ["k1", "k2", "k3", "k4", "k5"]
expected_items = [
("k1", "v1"), ("k2", "v2"), ("k3", "v3"), ("k4", "v4"), ("k5", "v5")
]
assert expected_keys == list(result.keys())
assert expected_items == list(result.items())
def test_list_merge_order():
""""Test that source lists are prepended to dest."""
source = ["a", "b", "c"]
dest = ["d", "e", "f"]
result = spack.config.merge_yaml(dest, source)
assert ["a", "b", "c", "d", "e", "f"] == result
def test_internal_config_update(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
before = mock_low_high_config.get('config')
assert before['install_tree']['root'] == 'install_tree_path'
# add an internal configuration scope
scope = spack.config.InternalConfigScope('command_line')
assert 'InternalConfigScope' in repr(scope)
mock_low_high_config.push_scope(scope)
command_config = mock_low_high_config.get('config', scope='command_line')
command_config['install_tree'] = {'root': 'foo/bar'}
mock_low_high_config.set('config', command_config, scope='command_line')
after = mock_low_high_config.get('config')
assert after['install_tree']['root'] == 'foo/bar'
def test_internal_config_filename(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
mock_low_high_config.push_scope(
spack.config.InternalConfigScope('command_line'))
with pytest.raises(NotImplementedError):
mock_low_high_config.get_config_filename('command_line', 'config')
def test_mark_internal():
data = {
'config': {
'bool': False,
'int': 6,
'numbers': [1, 2, 3],
'string': 'foo',
'dict': {
'more_numbers': [1, 2, 3],
'another_string': 'foo',
'another_int': 7,
}
}
}
marked = spack.config._mark_internal(data, 'x')
# marked version should be equal to the original
assert data == marked
def assert_marked(obj):
if type(obj) is bool:
return # can't subclass bool, so can't mark it
assert hasattr(obj, '_start_mark') and obj._start_mark.name == 'x'
assert hasattr(obj, '_end_mark') and obj._end_mark.name == 'x'
# everything in the marked version should have marks
checks = (marked.keys(), marked.values(),
marked['config'].keys(), marked['config'].values(),
marked['config']['numbers'],
marked['config']['dict'].keys(),
marked['config']['dict'].values(),
marked['config']['dict']['more_numbers'])
for seq in checks:
for obj in seq:
assert_marked(obj)
def test_internal_config_from_data():
config = spack.config.Configuration()
# add an internal config initialized from an inline dict
config.push_scope(spack.config.InternalConfigScope('_builtin', {
'config': {
'verify_ssl': False,
'build_jobs': 6,
}
}))
assert config.get('config:verify_ssl', scope='_builtin') is False
assert config.get('config:build_jobs', scope='_builtin') == 6
assert config.get('config:verify_ssl') is False
assert config.get('config:build_jobs') == 6
# push one on top and see what happens.
config.push_scope(spack.config.InternalConfigScope('higher', {
'config': {
'checksum': True,
'verify_ssl': True,
}
}))
assert config.get('config:verify_ssl', scope='_builtin') is False
assert config.get('config:build_jobs', scope='_builtin') == 6
assert config.get('config:verify_ssl', scope='higher') is True
assert config.get('config:build_jobs', scope='higher') is None
assert config.get('config:verify_ssl') is True
assert config.get('config:build_jobs') == 6
assert config.get('config:checksum') is True
assert config.get('config:checksum', scope='_builtin') is None
assert config.get('config:checksum', scope='higher') is True
def test_keys_are_ordered():
"""Test that keys in Spack YAML files retain their order from the file."""
expected_order = (
'bin',
'man',
'share/man',
'share/aclocal',
'lib',
'lib64',
'include',
'lib/pkgconfig',
'lib64/pkgconfig',
'share/pkgconfig',
''
)
config_scope = spack.config.ConfigScope(
'modules',
os.path.join(spack.paths.test_path, 'data', 'config')
)
data = config_scope.get_section('modules')
prefix_inspections = data['modules']['prefix_inspections']
for actual, expected in zip(prefix_inspections, expected_order):
assert actual == expected
def test_config_format_error(mutable_config):
"""This is raised when we try to write a bad configuration."""
with pytest.raises(spack.config.ConfigFormatError):
spack.config.set('compilers', {'bad': 'data'}, scope='site')
def get_config_error(filename, schema, yaml_string):
"""Parse a YAML string and return the resulting ConfigFormatError.
Fail if there is no ConfigFormatError
"""
with open(filename, 'w') as f:
f.write(yaml_string)
# parse and return error, or fail.
try:
spack.config.read_config_file(filename, schema)
except spack.config.ConfigFormatError as e:
return e
else:
pytest.fail('ConfigFormatError was not raised!')
def test_config_parse_dict_in_list(tmpdir):
with tmpdir.as_cwd():
e = get_config_error(
'repos.yaml', spack.schema.repos.schema, """\
repos:
- https://foobar.com/foo
- https://foobar.com/bar
- error:
- abcdef
- https://foobar.com/baz
""")
assert "repos.yaml:4" in str(e)
def test_config_parse_str_not_bool(tmpdir):
with tmpdir.as_cwd():
e = get_config_error(
'config.yaml', spack.schema.config.schema, """\
config:
verify_ssl: False
checksum: foobar
dirty: True
""")
assert "config.yaml:3" in str(e)
def test_config_parse_list_in_dict(tmpdir):
with tmpdir.as_cwd():
e = get_config_error(
'mirrors.yaml', spack.schema.mirrors.schema, """\
mirrors:
foo: http://foobar.com/baz
bar: http://barbaz.com/foo
baz: http://bazfoo.com/bar
travis: [1, 2, 3]
""")
assert "mirrors.yaml:5" in str(e)
def test_bad_config_section(mock_low_high_config):
"""Test that getting or setting a bad section gives an error."""
with pytest.raises(spack.config.ConfigSectionError):
spack.config.set('foobar', 'foobar')
with pytest.raises(spack.config.ConfigSectionError):
spack.config.get('foobar')
@pytest.mark.skipif(os.getuid() == 0, reason='user is root')
def test_bad_command_line_scopes(tmpdir, mock_low_high_config):
cfg = spack.config.Configuration()
with tmpdir.as_cwd():
with pytest.raises(spack.config.ConfigError):
spack.config._add_command_line_scopes(cfg, ['bad_path'])
touch('unreadable_file')
with pytest.raises(spack.config.ConfigError):
spack.config._add_command_line_scopes(cfg, ['unreadable_file'])
mkdirp('unreadable_dir')
with pytest.raises(spack.config.ConfigError):
try:
os.chmod('unreadable_dir', 0)
spack.config._add_command_line_scopes(cfg, ['unreadable_dir'])
finally:
os.chmod('unreadable_dir', 0o700) # so tmpdir can be removed
def test_add_command_line_scopes(tmpdir, mutable_config):
config_yaml = str(tmpdir.join('config.yaml'))
with open(config_yaml, 'w') as f:
f.write("""\
config:
verify_ssl: False
dirty: False
""")
spack.config._add_command_line_scopes(mutable_config, [str(tmpdir)])
def test_nested_override():
"""Ensure proper scope naming of nested overrides."""
base_name = spack.config.overrides_base_name
def _check_scopes(num_expected, debug_values):
scope_names = [s.name for s in spack.config.config.scopes.values() if
s.name.startswith(base_name)]
for i in range(num_expected):
name = '{0}{1}'.format(base_name, i)
assert name in scope_names
data = spack.config.config.get_config('config', name)
assert data['debug'] == debug_values[i]
# Check results from single and nested override
with spack.config.override('config:debug', True):
with spack.config.override('config:debug', False):
_check_scopes(2, [True, False])
_check_scopes(1, [True])
def test_alternate_override(monkeypatch):
"""Ensure proper scope naming of override when conflict present."""
base_name = spack.config.overrides_base_name
def _matching_scopes(regexpr):
return [spack.config.InternalConfigScope('{0}1'.format(base_name))]
# Check that the alternate naming works
monkeypatch.setattr(spack.config.config, 'matching_scopes',
_matching_scopes)
with spack.config.override('config:debug', False):
name = '{0}2'.format(base_name)
scope_names = [s.name for s in spack.config.config.scopes.values() if
s.name.startswith(base_name)]
assert name in scope_names
data = spack.config.config.get_config('config', name)
assert data['debug'] is False
def test_immutable_scope(tmpdir):
config_yaml = str(tmpdir.join('config.yaml'))
with open(config_yaml, 'w') as f:
f.write("""\
config:
install_tree:
root: dummy_tree_value
""")
scope = spack.config.ImmutableConfigScope('test', str(tmpdir))
data = scope.get_section('config')
assert data['config']['install_tree'] == {'root': 'dummy_tree_value'}
with pytest.raises(spack.config.ConfigError):
scope._write_section('config')
def test_single_file_scope(tmpdir, config):
env_yaml = str(tmpdir.join("env.yaml"))
with open(env_yaml, 'w') as f:
f.write("""\
env:
config:
verify_ssl: False
dirty: False
packages:
libelf:
compiler: [ '[email protected]' ]
repos:
- /x/y/z
""")
scope = spack.config.SingleFileScope(
'env', env_yaml, spack.schema.env.schema, ['env'])
with spack.config.override(scope):
# from the single-file config
assert spack.config.get('config:verify_ssl') is False
assert spack.config.get('config:dirty') is False
assert spack.config.get('packages:libelf:compiler') == ['[email protected]']
# from the lower config scopes
assert spack.config.get('config:checksum') is True
assert spack.config.get('config:checksum') is True
assert spack.config.get('packages:externalmodule:buildable') is False
assert spack.config.get('repos') == [
'/x/y/z', '$spack/var/spack/repos/builtin']
def test_single_file_scope_section_override(tmpdir, config):
"""Check that individual config sections can be overridden in an
environment config. The config here primarily differs in that the
``packages`` section is intended to override all other scopes (using the
"::" syntax).
"""
env_yaml = str(tmpdir.join("env.yaml"))
with open(env_yaml, 'w') as f:
f.write("""\
env:
config:
verify_ssl: False
packages::
libelf:
compiler: [ '[email protected]' ]
repos:
- /x/y/z
""")
scope = spack.config.SingleFileScope(
'env', env_yaml, spack.schema.env.schema, ['env'])
with spack.config.override(scope):
# from the single-file config
assert spack.config.get('config:verify_ssl') is False
assert spack.config.get('packages:libelf:compiler') == ['[email protected]']
# from the lower config scopes
assert spack.config.get('config:checksum') is True
assert not spack.config.get('packages:externalmodule')
assert spack.config.get('repos') == [
'/x/y/z', '$spack/var/spack/repos/builtin']
def test_write_empty_single_file_scope(tmpdir):
env_schema = spack.schema.env.schema
scope = spack.config.SingleFileScope(
'test', str(tmpdir.ensure('config.yaml')), env_schema, ['spack'])
scope._write_section('config')
# confirm we can write empty config
assert not scope.get_section('config')
def check_schema(name, file_contents):
"""Check a Spack YAML schema against some data"""
f = StringIO(file_contents)
data = syaml.load_config(f)
spack.config.validate(data, name)
def test_good_env_yaml(tmpdir):
check_schema(spack.schema.env.schema, """\
spack:
config:
verify_ssl: False
dirty: False
repos:
- ~/my/repo/location
mirrors:
remote: /foo/bar/baz
compilers:
- compiler:
spec: [email protected]
operating_system: cnl
modules: []
paths:
cc: /path/to/cc
cxx: /path/to/cxx
fc: /path/to/fc
f77: /path/to/f77
""")
def test_bad_env_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.env.schema, """\
env:
foobar:
verify_ssl: False
dirty: False
""")
def test_bad_config_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.config.schema, """\
config:
verify_ssl: False
module_roots:
fmod: /some/fake/location
""")
def test_bad_mirrors_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.mirrors.schema, """\
mirrors:
local: True
""")
def test_bad_repos_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.repos.schema, """\
repos:
True
""")
def test_bad_compilers_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.compilers.schema, """\
compilers:
key_instead_of_list: 'value'
""")
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.compilers.schema, """\
compilers:
- shmompiler:
environment: /bad/value
""")
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.compilers.schema, """\
compilers:
- compiler:
fenfironfent: /bad/value
""")
@pytest.mark.regression('13045')
def test_dotkit_in_config_does_not_raise(
mock_low_high_config, write_config_file, capsys
):
write_config_file('config',
{'config': {'module_roots': {'dotkit': '/some/path'}}},
'high')
spack.main.print_setup_info('sh')
captured = capsys.readouterr()
# Check that we set the variables we expect and that
# we throw a a deprecation warning without raising
assert '_sp_sys_type' in captured[0] # stdout
assert 'Warning' in captured[1] # stderr
def test_internal_config_section_override(mock_low_high_config,
write_config_file):
write_config_file('config', config_merge_list, 'low')
wanted_list = config_override_list['config']['build_stage:']
mock_low_high_config.push_scope(spack.config.InternalConfigScope
('high', {
'config:': {
'build_stage': wanted_list
}
}))
assert mock_low_high_config.get('config:build_stage') == wanted_list
def test_internal_config_dict_override(mock_low_high_config,
write_config_file):
write_config_file('config', config_merge_dict, 'low')
wanted_dict = config_override_dict['config']['info:']
mock_low_high_config.push_scope(spack.config.InternalConfigScope
('high', config_override_dict))
assert mock_low_high_config.get('config:info') == wanted_dict
def test_internal_config_list_override(mock_low_high_config,
write_config_file):
write_config_file('config', config_merge_list, 'low')
wanted_list = config_override_list['config']['build_stage:']
mock_low_high_config.push_scope(spack.config.InternalConfigScope
('high', config_override_list))
assert mock_low_high_config.get('config:build_stage') == wanted_list
def test_set_section_override(mock_low_high_config, write_config_file):
write_config_file('config', config_merge_list, 'low')
wanted_list = config_override_list['config']['build_stage:']
with spack.config.override('config::build_stage', wanted_list):
assert mock_low_high_config.get('config:build_stage') == wanted_list
assert config_merge_list['config']['build_stage'] == \
mock_low_high_config.get('config:build_stage')
def test_set_list_override(mock_low_high_config, write_config_file):
write_config_file('config', config_merge_list, 'low')
wanted_list = config_override_list['config']['build_stage:']
with spack.config.override('config:build_stage:', wanted_list):
assert wanted_list == mock_low_high_config.get('config:build_stage')
assert config_merge_list['config']['build_stage'] == \
mock_low_high_config.get('config:build_stage')
def test_set_dict_override(mock_low_high_config, write_config_file):
write_config_file('config', config_merge_dict, 'low')
wanted_dict = config_override_dict['config']['info:']
with spack.config.override('config:info:', wanted_dict):
assert wanted_dict == mock_low_high_config.get('config:info')
assert config_merge_dict['config']['info'] == \
mock_low_high_config.get('config:info')
def test_set_bad_path(config):
with pytest.raises(syaml.SpackYAMLError, match='Illegal leading'):
with spack.config.override(':bad:path', ''):
pass
def test_bad_path_double_override(config):
with pytest.raises(syaml.SpackYAMLError,
match='Meaningless second override'):
with spack.config.override('bad::double:override::directive', ''):
pass
| lgpl-2.1 | -8,205,310,399,040,983,000 | 30.787966 | 79 | 0.584881 | false |
tkzeng/molecular-design-toolkit | moldesign/geom/monitor.py | 1 | 3798 | # Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import moldesign as mdt
from . import toplevel
from . import constraints, grads, coords, setcoord
class Monitor(object):
def __init__(self, *atoms):
if len(atoms) != self.NUM_ATOMS:
raise ValueError('%s requires %d atoms, but %d passed' %
(type(self), self.NUM_ATOMS, len(atoms)))
self.atoms = atoms
@property
def value(self):
return self.GETTER(*self.atoms)
@value.setter
def value(self, val):
args = self.atoms + (val,)
self.SETTER(*args)
def gradient(self):
return grads._atom_grad_to_mol_grad(self.atoms, self.GRAD(*self.atoms))
@mdt.utils.kwargs_from(constraints.GeometryConstraint)
def constrain(self, **kwargs):
""" Constrain this coordinate.
This will add a new item to the parent molecule's constraint list.
Args:
**kwargs (dict): kwargs for constraints.GeometryConstraint
Returns:
constraints.GeometryConstraint: the constraint object
"""
c = self.CONSTRAINT(*self.atoms, **kwargs)
mol = self.atoms[0].molecule
for atom in mol.atoms[1:]:
if atom.molecule is not mol:
raise ValueError("Can't create constraint; atoms are not part of the same Molecule")
mol.constraints.append(c)
mol._reset_methods()
return c
def __call__(self, obj):
""" Calculate this value for the given trajectory
Args:
obj (mdt.Molecule or mdt.Trajectory): molecule or trajectory to measure
Returns:
moldesign.units.Quantity: this coordinate's value (for a molecule), or a list of values
(for a trajectory)
Note:
Atoms are identified by their index only; the atoms defined in the Monitor must have
the same indices as those in the passed object
"""
return self.GETTER(*(obj.atoms[a.index] for a in self.atoms))
def __str__(self):
return '%s: %s' % (type(self).__name__, self.value)
def __repr__(self):
return '<%s for atoms %s: %s>' % (type(self).__name__,
','.join(str(atom.index) for atom in self.atoms),
self.value)
@toplevel
class DistanceMonitor(Monitor):
NUM_ATOMS = 2
GETTER = staticmethod(coords.distance)
SETTER = staticmethod(setcoord.set_distance)
GRAD = staticmethod(grads.distance_gradient)
CONSTRAINT = constraints.DistanceConstraint
@toplevel
class AngleMonitor(Monitor):
NUM_ATOMS = 3
GETTER = staticmethod(coords.angle)
SETTER = staticmethod(setcoord.set_angle)
GRAD = staticmethod(grads.angle_gradient)
CONSTRAINT = constraints.AngleConstraint
@toplevel
class DihedralMonitor(Monitor):
def __init__(self, *atoms):
if len(atoms) in (1, 2):
atoms = coords._infer_dihedral(*atoms)
super(DihedralMonitor, self).__init__(*atoms)
NUM_ATOMS = 4
GETTER = staticmethod(coords.dihedral)
SETTER = staticmethod(setcoord.set_dihedral)
GRAD = staticmethod(grads.dihedral_gradient)
CONSTRAINT = constraints.DihedralConstraint
| apache-2.0 | -4,536,990,009,628,288,500 | 32.026087 | 100 | 0.633491 | false |
INM-6/Python-Module-of-the-Week | session01_Decorators/test_printtime_cm.py | 1 | 1210 | #!/usr/bin/env python3
import time, re, io, sys
import contextlib
def test_we_can_import_module():
import printtime_cm
def test_context_manager_exists():
import printtime_cm
printtime_cm.printtime_cm
def test_context_manager_can_be_used():
import printtime_cm
with printtime_cm.printtime_cm():
pass
def test_sleep_1():
import printtime_cm
tmp = io.StringIO()
with contextlib.redirect_stdout(tmp):
with printtime_cm.printtime_cm():
time.sleep(1)
out = tmp.getvalue()
re.match(r'calculations took 1\..*s', out, re.IGNORECASE)
def test_sleep_nested():
import printtime_cm
tmp = io.StringIO()
tmp2 = io.StringIO()
with contextlib.redirect_stdout(tmp):
with printtime_cm.printtime_cm():
with contextlib.redirect_stdout(tmp2):
with printtime_cm.printtime_cm():
time.sleep(1)
time.sleep(1)
out = tmp.getvalue()
out2 = tmp.getvalue()
re.match(r'calculations took 2\..*s', out, re.IGNORECASE)
re.match(r'calculations took 1\..*s', out2, re.IGNORECASE)
if __name__ == '__main__':
import pytest
pytest.main([__file__] + sys.argv[1:])
| mit | 7,859,297,349,880,794,000 | 24.208333 | 62 | 0.620661 | false |
mozilla/peekaboo | peekaboo/main/tests/test_views.py | 1 | 6973 | # -*- coding: utf-8 -*-
import os
import datetime
import json
from nose.tools import eq_, ok_
from django.test import TestCase, Client
from django.conf import settings
from django.contrib.auth.models import User
from funfactory.urlresolvers import reverse, split_path
from peekaboo.main.models import Location, Visitor
class LocalizingClient(Client):
"""Client which prepends a locale so test requests can get through
LocaleURLMiddleware without resulting in a locale-prefix-adding 301.
Otherwise, we'd have to hard-code locales into our tests everywhere or
{mock out reverse() and make LocaleURLMiddleware not fire}.
"""
def request(self, **request):
"""Make a request, but prepend a locale if there isn't one already."""
# Fall back to defaults as in the superclass's implementation:
path = request.get('PATH_INFO', self.defaults.get('PATH_INFO', '/'))
locale, shortened = split_path(path)
if not locale:
request['PATH_INFO'] = '/%s/%s' % (settings.LANGUAGE_CODE,
shortened)
return super(LocalizingClient, self).request(**request)
class BaseTestCase(TestCase):
client_class = LocalizingClient
def _login(self, is_staff=True, is_superuser=False):
user, __ = User.objects.get_or_create(
username='shannon',
email='[email protected]',
)
if is_superuser:
is_staff = True
user.is_staff = is_staff
user.is_superuser = is_superuser
user.set_password('secret')
user.save()
assert self.client.login(username='shannon', password='secret')
return user
class TestViews(BaseTestCase):
def test_contribute_json(self):
response = self.client.get('/contribute.json')
eq_(response.status_code, 200)
# Should be valid JSON, but it's a streaming content because
# it comes from django.views.static.serve
ok_(json.loads(''.join(response.streaming_content)))
eq_(response['Content-Type'], 'application/json')
def test_log_entries(self):
location = Location.objects.create(
name='Mountain View',
slug='mv',
timezone='US/Pacific',
)
url = reverse('main:log_entries', args=('mv',))
response = self.client.get(url)
eq_(response.status_code, 302)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['created'], [])
eq_(data['latest'], None)
# add an entry
visitor1 = Visitor.objects.create(
location=location,
first_name='Bill',
last_name='Gates',
job_title='Boss',
)
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(len(data['created']), 1)
eq_(data['created'][0]['name'], 'Bill Gates')
eq_(data['created'][0]['job_title'], 'Boss')
eq_(data['created'][0]['id'], visitor1.pk)
ok_(isinstance(data['latest'], int))
# this number should be a
latest_timestamp = data['latest']
latest = datetime.datetime.utcfromtimestamp(latest_timestamp)
# this won't contain a timezone but the hour and minute should
# be the same as the `visitor1`
eq_(
visitor1.created.strftime('%H:%M'),
latest.strftime('%H:%M')
)
# include this and nothing new should come
response = self.client.get(url, {
'latest': str(latest_timestamp),
})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['created'], [])
eq_(data['modified'], [])
eq_(data['latest'], None)
# let's add another, newer
visitor2 = Visitor.objects.create(
location=location,
first_name='Paul',
last_name='Allen',
)
visitor2.created += datetime.timedelta(seconds=1)
visitor2.save()
response = self.client.get(url, {
'latest': str(latest_timestamp),
})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(len(data['created']), 1)
eq_(data['created'][0]['name'], 'Paul Allen')
eq_(data['created'][0]['id'], visitor2.pk)
new_latest_timestamp = data['latest']
# this won't contain a timezone but the hour and minute should
# be the same as the `visitor1`
eq_(latest_timestamp + 1, new_latest_timestamp)
# ask one more time and nothing new should come back
previous_latest = data['latest']
response = self.client.get(url, {
'latest': previous_latest,
})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(len(data['created']), 0)
eq_(len(data['modified']), 0)
# let's modify the first visitor
visitor1.job_title = 'Philantropist'
visitor1.modified += datetime.timedelta(seconds=10)
visitor1.save()
response = self.client.get(url, {
'latest': previous_latest,
})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(len(data['modified']), 1)
previous_latest_timestamp = new_latest_timestamp
new_latest_timestamp = data['latest']
eq_(
previous_latest_timestamp + 10 - 1,
new_latest_timestamp
)
response = self.client.get(url, {
'latest': str(new_latest_timestamp),
})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['created'], [])
eq_(data['modified'], [])
eq_(data['latest'], None)
def test_eventbrite_upload(self):
url = reverse('main:csv_upload')
response = self.client.get(url)
eq_(response.status_code, 302)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
location = Location.objects.create(
name='Berlin',
slug='berlin',
timezone='Europe/Berlin',
)
_here = os.path.dirname(__file__)
response = self.client.post(url, {
'file': open(os.path.join(_here, 'sample-eventbrite.csv')),
'format': 'eventbrite',
'location': location.id,
'date': '2015-06-16 13:00:00', # Europe summer time, is +2h
})
visitors = Visitor.objects.filter(location=location)
first_names = [x.first_name for x in visitors.order_by('first_name')]
eq_(first_names, [u'Nicolai Froehlich', u'Södan'])
first_created = [x.created for x in visitors][0]
eq_(first_created.strftime('%H:%M %Z'), '11:00 UTC')
| mpl-2.0 | 3,321,931,372,646,653,000 | 32.681159 | 78 | 0.576736 | false |
pombredanne/datanommer | datanommer.commands/setup.py | 1 | 1930 | # This file is a part of datanommer, a message sink for fedmsg.
# Copyright (C) 2014, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
import sys
f = open('README.rst')
long_description = f.read().strip()
long_description = long_description.split('split here', 1)[1]
f.close()
version = '0.4.6'
setup(
name='datanommer.commands',
version=version,
description="Console comands for datanommer",
long_description=long_description,
author='Ralph Bean',
author_email='[email protected]',
url='http://github.com/fedora-infra/datanommer',
license='GPLv3+',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=['datanommer'],
include_package_data=True,
zip_safe=False,
install_requires=[
"datanommer.models",
"fedmsg",
],
entry_points={
'console_scripts': (
'datanommer-create-db=datanommer.commands:create',
'datanommer-dump=datanommer.commands:dump',
'datanommer-stats=datanommer.commands:stats',
'datanommer-latest=datanommer.commands:latest',
),
},
tests_require=[
"nose",
"mock",
"fedmsg_meta_fedora_infrastructure",
"freezegun",
],
test_suite='nose.collector',
)
| gpl-3.0 | -8,626,820,665,828,198,000 | 32.275862 | 79 | 0.678238 | false |
tylertian/Openstack | openstack F/python-novaclient/novaclient/tests/test_http.py | 1 | 3725 | import mock
import requests
from novaclient import client
from novaclient import exceptions
from novaclient.tests import utils
fake_response = utils.TestResponse({
"status_code": 200,
"text": '{"hi": "there"}',
})
mock_request = mock.Mock(return_value=(fake_response))
refused_response = utils.TestResponse({
"status_code": 400,
"text": '[Errno 111] Connection refused',
})
refused_mock_request = mock.Mock(return_value=(refused_response))
bad_req_response = utils.TestResponse({
"status_code": 400,
"text": '',
})
bad_req_mock_request = mock.Mock(return_value=(bad_req_response))
def get_client():
cl = client.HTTPClient("username", "password",
"project_id", "auth_test")
return cl
def get_authed_client():
cl = get_client()
cl.management_url = "http://example.com"
cl.auth_token = "token"
return cl
class ClientTest(utils.TestCase):
def test_get(self):
cl = get_authed_client()
@mock.patch.object(requests.Session, "request", mock_request)
@mock.patch('time.time', mock.Mock(return_value=1234))
def test_get_call():
resp, body = cl.get("/hi")
headers = {"X-Auth-Token": "token",
"X-Auth-Project-Id": "project_id",
"User-Agent": cl.USER_AGENT,
'Accept': 'application/json',
}
mock_request.assert_called_with(
"GET",
"http://example.com/hi",
headers=headers,
**self.TEST_REQUEST_BASE)
# Automatic JSON parsing
self.assertEqual(body, {"hi": "there"})
test_get_call()
def test_post(self):
cl = get_authed_client()
@mock.patch.object(requests.Session, "request", mock_request)
def test_post_call():
cl.post("/hi", body=[1, 2, 3])
headers = {
"X-Auth-Token": "token",
"X-Auth-Project-Id": "project_id",
"Content-Type": "application/json",
'Accept': 'application/json',
"User-Agent": cl.USER_AGENT
}
mock_request.assert_called_with(
"POST",
"http://example.com/hi",
headers=headers,
data='[1, 2, 3]',
**self.TEST_REQUEST_BASE)
test_post_call()
def test_auth_failure(self):
cl = get_client()
# response must not have x-server-management-url header
@mock.patch.object(requests.Session, "request", mock_request)
def test_auth_call():
self.assertRaises(exceptions.AuthorizationFailure, cl.authenticate)
test_auth_call()
def test_connection_refused(self):
cl = get_client()
@mock.patch.object(requests.Session, "request", refused_mock_request)
def test_refused_call():
self.assertRaises(exceptions.ConnectionRefused, cl.get, "/hi")
test_refused_call()
def test_bad_request(self):
cl = get_client()
@mock.patch.object(requests.Session, "request", bad_req_mock_request)
def test_refused_call():
self.assertRaises(exceptions.BadRequest, cl.get, "/hi")
test_refused_call()
def test_client_logger(self):
cl1 = client.HTTPClient("username", "password", "project_id",
"auth_test", http_log_debug=True)
self.assertEquals(len(cl1._logger.handlers), 1)
cl2 = client.HTTPClient("username", "password", "project_id",
"auth_test", http_log_debug=True)
self.assertEquals(len(cl2._logger.handlers), 1)
| apache-2.0 | -3,069,217,448,494,700,000 | 29.532787 | 79 | 0.56 | false |
dkdfirefly/speaker_project | code/separateLeadStereo/separateLeadStereoParam.py | 1 | 41756 | #!/usr/bin/python
# copyright (C) 2011 Jean-Louis Durrieu
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import SIMM
#import scikits.audiolab
import scipy
#if np.double(scipy.__version__[:3]) < 0.8:
# raise ImportError('Version of scipy is %s, to read wavfile, one needs >= 0.8' %(scipy.__version__))
import scipy.io.wavfile as wav
import os
import sys
from tracking import viterbiTrackingArray
# SOME USEFUL, INSTRUMENTAL, FUNCTIONS
def db(val):
"""
db(positiveValue)
Returns the decibel value of the input positiveValue
"""
return 10 * np.log10(val)
def ISDistortion(X,Y):
"""
value = ISDistortion(X, Y)
Returns the value of the Itakura-Saito (IS) divergence between
matrix X and matrix Y. X and Y should be two NumPy arrays with
same dimension.
"""
return sum((-np.log(X / Y) + (X / Y) - 1))
# DEFINING SOME WINDOW FUNCTIONS
def sinebell(lengthWindow):
"""
window = sinebell(lengthWindow)
Computes a "sinebell" window function of length L=lengthWindow
The formula is:
window(t) = sin(pi * t / L), t = 0..L-1
"""
window = np.sin((np.pi * (np.arange(lengthWindow))) \
/ (1.0 * lengthWindow))
return window
def hann(args):
"""
window = hann(args)
Computes a Hann window, with NumPy's function hanning(args).
"""
return np.hanning(args)
# FUNCTIONS FOR TIME-FREQUENCY REPRESENTATION
def stft(data, window=sinebell(2048), hopsize=256.0, nfft=2048.0, \
fs=44100.0):
"""
X, F, N = stft(data, window=sinebell(2048), hopsize=1024.0,
nfft=2048.0, fs=44100)
Computes the short time Fourier transform (STFT) of data.
Inputs:
data : one-dimensional time-series to be
analyzed
window=sinebell(2048) : analysis window
hopsize=1024.0 : hopsize for the analysis
nfft=2048.0 : number of points for the Fourier
computation (the user has to provide an
even number)
fs=44100.0 : sampling rate of the signal
Outputs:
X : STFT of data
F : values of frequencies at each Fourier
bins
N : central time at the middle of each
analysis window
"""
# window defines the size of the analysis windows
lengthWindow = window.size
# !!! adding zeros to the beginning of data, such that the first
# window is centered on the first sample of data
data = np.concatenate((np.zeros(lengthWindow / 2.0),data))
lengthData = data.size
# adding one window for the last frame (same reason as for the
# first frame)
numberFrames = np.ceil((lengthData - lengthWindow) / hopsize \
+ 1) + 1
newLengthData = (numberFrames - 1) * hopsize + lengthWindow
# zero-padding data such that it holds an exact number of frames
data = np.concatenate((data, np.zeros([newLengthData - lengthData])))
# the output STFT has nfft/2+1 rows. Note that nfft has to be an
# even number (and a power of 2 for the fft to be fast)
numberFrequencies = nfft / 2.0 + 1
STFT = np.zeros([numberFrequencies, numberFrames], dtype=complex)
for n in np.arange(numberFrames):
beginFrame = n * hopsize
endFrame = beginFrame + lengthWindow
frameToProcess = window * data[beginFrame:endFrame]
STFT[:,n] = np.fft.rfft(frameToProcess, nfft);
F = np.arange(numberFrequencies) / nfft * fs
N = np.arange(numberFrames) * hopsize / fs
return STFT, F, N
def istft(X, window=sinebell(2048), hopsize=256.0, nfft=2048.0):
"""
data = istft(X, window=sinebell(2048), hopsize=256.0, nfft=2048.0)
Computes an inverse of the short time Fourier transform (STFT),
here, the overlap-add procedure is implemented.
Inputs:
X : STFT of the signal, to be "inverted"
window=sinebell(2048) : synthesis window
(should be the "complementary" window
for the analysis window)
hopsize=1024.0 : hopsize for the analysis
nfft=2048.0 : number of points for the Fourier
computation
(the user has to provide an even number)
Outputs:
data : time series corresponding to the given
STFT the first half-window is removed,
complying with the STFT computation
given in the function 'stft'
"""
lengthWindow = np.array(window.size)
numberFrequencies, numberFrames = np.array(X.shape)
lengthData = hopsize * (numberFrames - 1) + lengthWindow
data = np.zeros(lengthData)
for n in np.arange(numberFrames):
beginFrame = n * hopsize
endFrame = beginFrame + lengthWindow
frameTMP = np.fft.irfft(X[:,n], nfft)
frameTMP = frameTMP[:lengthWindow]
data[beginFrame:endFrame] = data[beginFrame:endFrame] \
+ window * frameTMP
# remove the extra bit before data that was - supposedly - added
# in the stft computation:
data = data[(lengthWindow / 2.0):]
return data
# DEFINING THE FUNCTIONS TO CREATE THE 'BASIS' WF0
def generate_WF0_chirped(minF0, maxF0, Fs, Nfft=2048, stepNotes=4, \
lengthWindow=2048, Ot=0.5, perF0=2, \
depthChirpInSemiTone=0.5, loadWF0=True,
analysisWindow='hanning'):
"""
F0Table, WF0 = generate_WF0_chirped(minF0, maxF0, Fs, Nfft=2048,
stepNotes=4, lengthWindow=2048,
Ot=0.5, perF0=2,
depthChirpInSemiTone=0.5)
Generates a 'basis' matrix for the source part WF0, using the
source model KLGLOTT88, with the following I/O arguments:
Inputs:
minF0 the minimum value for the fundamental
frequency (F0)
maxF0 the maximum value for F0
Fs the desired sampling rate
Nfft the number of bins to compute the Fourier
transform
stepNotes the number of F0 per semitone
lengthWindow the size of the window for the Fourier
transform
Ot the glottal opening coefficient for
KLGLOTT88
perF0 the number of chirps considered per F0
value
depthChirpInSemiTone the maximum value, in semitone, of the
allowed chirp per F0
Outputs:
F0Table the vector containing the values of the fundamental
frequencies in Hertz (Hz) corresponding to the
harmonic combs in WF0, i.e. the columns of WF0
WF0 the basis matrix, where each column is a harmonic comb
generated by KLGLOTT88 (with a sinusoidal model, then
transformed into the spectral domain)
"""
# generating a filename to keep data:
filename = str('').join(['wf0_',
'_minF0-', str(minF0),
'_maxF0-', str(maxF0),
'_Fs-', str(Fs),
'_Nfft-', str(Nfft),
'_stepNotes-', str(stepNotes),
'_Ot-', str(Ot),
'_perF0-', str(perF0),
'_depthChirp-', str(depthChirpInSemiTone),
'_analysisWindow-', analysisWindow,
'.npz'])
if os.path.isfile(filename) and loadWF0:
struc = np.load(filename)
return struc['F0Table'], struc['WF0']
# converting to double arrays:
minF0=np.double(minF0)
maxF0=np.double(maxF0)
Fs=np.double(Fs)
stepNotes=np.double(stepNotes)
# computing the F0 table:
numberOfF0 = np.ceil(12.0 * stepNotes * np.log2(maxF0 / minF0)) + 1
F0Table=minF0 * (2 ** (np.arange(numberOfF0,dtype=np.double) \
/ (12 * stepNotes)))
numberElementsInWF0 = numberOfF0 * perF0
# computing the desired WF0 matrix
WF0 = np.zeros([Nfft, numberElementsInWF0],dtype=np.double)
for fundamentalFrequency in np.arange(numberOfF0):
odgd, odgdSpec = \
generate_ODGD_spec(F0Table[fundamentalFrequency], Fs, \
Ot=Ot, lengthOdgd=lengthWindow, \
Nfft=Nfft, t0=0.0,\
analysisWindowType=analysisWindow) # 20100924 trying with hann window
WF0[:,fundamentalFrequency * perF0] = np.abs(odgdSpec) ** 2
for chirpNumber in np.arange(perF0 - 1):
F2 = F0Table[fundamentalFrequency] \
* (2 ** ((chirpNumber + 1.0) * depthChirpInSemiTone \
/ (12.0 * (perF0 - 1.0))))
# F0 is the mean of F1 and F2.
F1 = 2.0 * F0Table[fundamentalFrequency] - F2
odgd, odgdSpec = \
generate_ODGD_spec_chirped(F1, F2, Fs, \
Ot=Ot, \
lengthOdgd=lengthWindow, \
Nfft=Nfft, t0=0.0)
WF0[:,fundamentalFrequency * perF0 + chirpNumber + 1] = \
np.abs(odgdSpec) ** 2
np.savez(filename, F0Table=F0Table, WF0=WF0)
return F0Table, WF0
def generate_ODGD_spec(F0, Fs, lengthOdgd=2048, Nfft=2048, Ot=0.5, \
t0=0.0, analysisWindowType='sinebell'):
"""
generateODGDspec:
generates a waveform ODGD and the corresponding spectrum,
using as analysis window the -optional- window given as
argument.
"""
# converting input to double:
F0 = np.double(F0)
Fs = np.double(Fs)
Ot = np.double(Ot)
t0 = np.double(t0)
# compute analysis window of given type:
if analysisWindowType=='sinebell':
analysisWindow = sinebell(lengthOdgd)
else:
if analysisWindowType=='hanning' or \
analysisWindowType=='hanning':
analysisWindow = hann(lengthOdgd)
# maximum number of partials in the spectral comb:
partialMax = np.floor((Fs / 2) / F0)
# Frequency numbers of the partials:
frequency_numbers = np.arange(1,partialMax + 1)
# intermediate value
temp_array = 1j * 2.0 * np.pi * frequency_numbers * Ot
# compute the amplitudes for each of the frequency peaks:
amplitudes = F0 * 27 / 4 \
* (np.exp(-temp_array) \
+ (2 * (1 + 2 * np.exp(-temp_array)) / temp_array) \
- (6 * (1 - np.exp(-temp_array)) \
/ (temp_array ** 2))) \
/ temp_array
# Time stamps for the time domain ODGD
timeStamps = np.arange(lengthOdgd) / Fs + t0 / F0
# Time domain odgd:
odgd = np.exp(np.outer(2.0 * 1j * np.pi * F0 * frequency_numbers, \
timeStamps)) \
* np.outer(amplitudes, np.ones(lengthOdgd))
odgd = np.sum(odgd, axis=0)
# spectrum:
odgdSpectrum = np.fft.fft(np.real(odgd * analysisWindow), n=Nfft)
return odgd, odgdSpectrum
def generate_ODGD_spec_chirped(F1, F2, Fs, lengthOdgd=2048, Nfft=2048, \
Ot=0.5, t0=0.0, \
analysisWindowType='sinebell'):
"""
generateODGDspecChirped:
generates a waveform ODGD and the corresponding spectrum,
using as analysis window the -optional- window given as
argument.
"""
# converting input to double:
F1 = np.double(F1)
F2 = np.double(F2)
F0 = np.double(F1 + F2) / 2.0
Fs = np.double(Fs)
Ot = np.double(Ot)
t0 = np.double(t0)
# compute analysis window of given type:
if analysisWindowType == 'sinebell':
analysisWindow = sinebell(lengthOdgd)
else:
if analysisWindowType == 'hanning' or \
analysisWindowType == 'hann':
analysisWindow = hann(lengthOdgd)
# maximum number of partials in the spectral comb:
partialMax = np.floor((Fs / 2) / np.max(F1, F2))
# Frequency numbers of the partials:
frequency_numbers = np.arange(1,partialMax + 1)
# intermediate value
temp_array = 1j * 2.0 * np.pi * frequency_numbers * Ot
# compute the amplitudes for each of the frequency peaks:
amplitudes = F0 * 27 / 4 * \
(np.exp(-temp_array) \
+ (2 * (1 + 2 * np.exp(-temp_array)) / temp_array) \
- (6 * (1 - np.exp(-temp_array)) \
/ (temp_array ** 2))) \
/ temp_array
# Time stamps for the time domain ODGD
timeStamps = np.arange(lengthOdgd) / Fs + t0 / F0
# Time domain odgd:
odgd = np.exp(2.0 * 1j * np.pi \
* (np.outer(F1 * frequency_numbers,timeStamps) \
+ np.outer((F2 - F1) \
* frequency_numbers,timeStamps ** 2) \
/ (2 * lengthOdgd / Fs))) \
* np.outer(amplitudes,np.ones(lengthOdgd))
odgd = np.sum(odgd,axis=0)
# spectrum:
odgdSpectrum = np.fft.fft(real(odgd * analysisWindow), n=Nfft)
return odgd, odgdSpectrum
def generateHannBasis(numberFrequencyBins, sizeOfFourier, Fs, \
frequencyScale='linear', numberOfBasis=20, \
overlap=.75):
isScaleRecognized = False
if frequencyScale == 'linear':
# number of windows generated:
numberOfWindowsForUnit = np.ceil(1.0 / (1.0 - overlap))
# recomputing the overlap to exactly fit the entire
# number of windows:
overlap = 1.0 - 1.0 / np.double(numberOfWindowsForUnit)
# length of the sine window - that is also to say: bandwidth
# of the sine window:
lengthSineWindow = np.ceil(numberFrequencyBins \
/ ((1.0 - overlap) \
* (numberOfBasis - 1) + 1 \
- 2.0 * overlap))
# even window length, for convenience:
lengthSineWindow = 2.0 * np.floor(lengthSineWindow / 2.0)
# for later compatibility with other frequency scales:
mappingFrequency = np.arange(numberFrequencyBins)
# size of the "big" window
sizeBigWindow = 2.0 * numberFrequencyBins
# centers for each window
## the first window is centered at, in number of window:
firstWindowCenter = -numberOfWindowsForUnit + 1
## and the last is at
lastWindowCenter = numberOfBasis - numberOfWindowsForUnit + 1
## center positions in number of frequency bins
sineCenters = np.round(\
np.arange(firstWindowCenter, lastWindowCenter) \
* (1 - overlap) * np.double(lengthSineWindow) \
+ lengthSineWindow / 2.0)
# For future purpose: to use different frequency scales
isScaleRecognized = True
# For frequency scale in logarithm (such as ERB scales)
if frequencyScale == 'log':
isScaleRecognized = False
# checking whether the required scale is recognized
if not(isScaleRecognized):
print "The desired feature for frequencyScale is not recognized yet..."
return 0
# the shape of one window:
prototypeSineWindow = hann(lengthSineWindow)
# adding zeroes on both sides, such that we do not need to check
# for boundaries
bigWindow = np.zeros([sizeBigWindow * 2, 1])
bigWindow[(sizeBigWindow - lengthSineWindow / 2.0):\
(sizeBigWindow + lengthSineWindow / 2.0)] \
= np.vstack(prototypeSineWindow)
WGAMMA = np.zeros([numberFrequencyBins, numberOfBasis])
for p in np.arange(numberOfBasis):
WGAMMA[:, p] = np.hstack(bigWindow[np.int32(mappingFrequency \
- sineCenters[p] \
+ sizeBigWindow)])
return WGAMMA
# MAIN FUNCTION, FOR DEFAULT BEHAVIOUR IF THE SCRIPT IS "LAUNCHED"
def main():
import optparse
usage = "usage: %prog [options] inputAudioFile"
parser = optparse.OptionParser(usage)
# Name of the output files:
parser.add_option("-v", "--vocal-output-file",
dest="voc_output_file", type="string",
help="name of the audio output file for the estimated\n"\
"solo (vocal) part",
default="estimated_solo.wav")
parser.add_option("-m", "--music-output-file",
dest="mus_output_file", type="string",
help="name of the audio output file for the estimated\n"\
"music part",
default="estimated_music.wav")
parser.add_option("-p", "--pitch-output-file",
dest="pitch_output_file", type="string",
help="name of the output file for the estimated pitches",
default="pitches.txt")
# Some more optional options:
parser.add_option("-d", "--with-display", dest="displayEvolution",
action="store_true",help="display the figures",
default=False)
parser.add_option("-q", "--quiet", dest="verbose",
action="store_false",
help="use to quiet all output verbose",
default=True)
parser.add_option("--nb-iterations", dest="nbiter",
help="number of iterations", type="int",
default=100)
parser.add_option("--window-size", dest="windowSize", type="float",
default=0.04644,help="size of analysis windows, in s.")
parser.add_option("--Fourier-size", dest="fourierSize", type="int",
default=2048,
help="size of Fourier transforms, "\
"in samples.")
parser.add_option("--hopsize", dest="hopsize", type="float",
default=0.0058,
help="size of the hop between analysis windows, in s.")
parser.add_option("--nb-accElements", dest="R", type="float",
default=40.0,
help="number of elements for the accompaniment.")
parser.add_option("--with-melody", dest="melody", type="string",
default=None,
help="provide the melody in a file named MELODY, "\
"with at each line: <time (s)><F0 (Hz)>.")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments, use option -h for help.")
displayEvolution = options.displayEvolution
if displayEvolution:
import matplotlib.pyplot as plt
import imageMatlab
## plt.rc('text', usetex=True)
plt.rc('image',cmap='jet') ## gray_r
plt.ion()
# Compulsory option: name of the input file:
inputAudioFile = args[0]
fs, data = wav.read(inputAudioFile)
# data = np.double(data) / 32768.0 # makes data vary from -1 to 1
scaleData = 1.2 * data.max() # to rescale the data.
dataType = data.dtype
data = np.double(data) / scaleData # makes data vary from -1 to 1
tmp = np.zeros((data.size, 2))
tmp[:,0] = data
tmp[:,1] = data
data = tmp
if data.shape[0] == data.size: # data is multi-channel
print "The audio file is not stereo. Try separateLead.py instead."
raise ValueError("number of dimensions of the input not 2")
if data.shape[1] != 2:
print "The data is multichannel, but not stereo... \n"
print "Unfortunately this program does not scale well. Data is \n"
print "reduced to its 2 first channels.\n"
data = data[:,0:2]
# Processing the options:
windowSizeInSamples = np.round(options.windowSize * fs)
hopsize = np.round(options.hopsize * fs)
NFT = options.fourierSize
niter = options.nbiter
R = options.R
if options.verbose:
print "Some parameter settings:"
print " Size of analysis windows: ", windowSizeInSamples
print " Hopsize: ", hopsize
print " Size of Fourier transforms: ", NFT
print " Number of iterations to be done: ", niter
print " Number of elements in WM: ", R
XR, F, N = stft(data[:,0], fs=fs, hopsize=hopsize,
window=sinebell(windowSizeInSamples), nfft=NFT)
XL, F, N = stft(data[:,1], fs=fs, hopsize=hopsize,
window=sinebell(windowSizeInSamples), nfft=NFT)
# SX is the power spectrogram:
## SXR = np.maximum(np.abs(XR) ** 2, 10 ** -8)
## SXL = np.maximum(np.abs(XL) ** 2, 10 ** -8)
SXR = np.abs(XR) ** 2
SXL = np.abs(XL) ** 2
del data, F, N
# TODO: also process these as options:
eps = 10 ** -9
minF0 = 100
maxF0 = 800
Fs = fs
F, N = SXR.shape
stepNotes = 20 # this is the number of F0s within one semitone
# until 17/09/2010 : stepNotes = 20
# 17/09/2010 : trying stepNotes = 8, checking for less artefacts
K = 10 # number of spectral shapes for the filter part
# R = 40 # number of spectral shapes for the accompaniment
P = 30 # number of elements in dictionary of smooth filters
chirpPerF0 = 1 # number of chirped spectral shapes between each F0
# this feature should be further studied before
# we find a good way of doing that.
# Create the harmonic combs, for each F0 between minF0 and maxF0:
F0Table, WF0 = \
generate_WF0_chirped(minF0, maxF0, Fs, Nfft=NFT, \
stepNotes=stepNotes, \
lengthWindow=windowSizeInSamples, Ot=0.25, \
perF0=chirpPerF0, \
depthChirpInSemiTone=.15, loadWF0=True,\
analysisWindow='sinebell')
WF0 = WF0[0:F, :] # ensure same size as SX
NF0 = F0Table.size # number of harmonic combs
# Normalization:
WF0 = WF0 / np.outer(np.ones(F), np.amax(WF0, axis=0))
# Create the dictionary of smooth filters, for the filter part of
# the lead isntrument:
WGAMMA = generateHannBasis(F, NFT, Fs=fs, frequencyScale='linear', \
numberOfBasis=P, overlap=.75)
if displayEvolution:
plt.figure(1);plt.clf()
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel(r'Frame number $n$', fontsize=16)
plt.ylabel(r'Leading source number $u$', fontsize=16)
plt.ion()
# plt.show()
## the following seems superfluous if mpl's backend is macosx...
## raw_input("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"\
## "!! Press Return to resume the program. !!\n"\
## "!! Be sure that the figure has been !!\n"\
## "!! already displayed, so that the !!\n"\
## "!! evolution of HF0 will be visible. !!\n"\
## "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
if options.melody is None:
## section to estimate the melody, on monophonic algo:
SX = np.maximum(np.abs((XR + XL) / 2.0) ** 2, 10 ** -8)
# First round of parameter estimation:
HGAMMA, HPHI, HF0, HM, WM, recoError1 = SIMM.SIMM(
# the data to be fitted to:
SX,
# the basis matrices for the spectral combs
WF0,
# and for the elementary filters:
WGAMMA,
# number of desired filters, accompaniment spectra:
numberOfFilters=K, numberOfAccompanimentSpectralShapes=R,
# putting only 2 elements in accompaniment for a start...
# if any, initial amplitude matrices for
HGAMMA0=None, HPHI0=None,
HF00=None,
WM0=None, HM0=None,
# Some more optional arguments, to control the "convergence"
# of the algo
numberOfIterations=niter, updateRulePower=1.,
stepNotes=stepNotes,
lambdaHF0 = 0.0 / (1.0 * SX.max()), alphaHF0=0.9,
verbose=options.verbose, displayEvolution=displayEvolution)
if displayEvolution:
h2 = plt.figure(2);plt.clf();
imageMatlab.imageM(20 * np.log10(HF0))
matMax = (20 * np.log10(HF0)).max()
matMed = np.median(20 * np.log10(HF0))
plt.clim([matMed - 100, matMax])
# Viterbi decoding to estimate the predominant fundamental
# frequency line
scale = 1.0
transitions = np.exp(-np.floor(np.arange(0,NF0) / stepNotes) * scale)
cutoffnote = 2 * 5 * stepNotes
transitions[cutoffnote:] = transitions[cutoffnote - 1]
transitionMatrixF0 = np.zeros([NF0 + 1, NF0 + 1]) # toeplitz matrix
b = np.arange(NF0)
transitionMatrixF0[0:NF0, 0:NF0] = \
transitions[\
np.array(np.abs(np.outer(np.ones(NF0), b) \
- np.outer(b, np.ones(NF0))), dtype=int)]
pf_0 = transitions[cutoffnote - 1] * 10 ** (-90)
p0_0 = transitions[cutoffnote - 1] * 10 ** (-100)
p0_f = transitions[cutoffnote - 1] * 10 ** (-80)
transitionMatrixF0[0:NF0, NF0] = pf_0
transitionMatrixF0[NF0, 0:NF0] = p0_f
transitionMatrixF0[NF0, NF0] = p0_0
sumTransitionMatrixF0 = np.sum(transitionMatrixF0, axis=1)
transitionMatrixF0 = transitionMatrixF0 \
/ np.outer(sumTransitionMatrixF0, \
np.ones(NF0 + 1))
priorProbabilities = 1 / (NF0 + 1.0) * np.ones([NF0 + 1])
logHF0 = np.zeros([NF0 + 1, N])
normHF0 = np.amax(HF0, axis=0)
barHF0 = np.array(HF0)
logHF0[0:NF0, :] = np.log(barHF0)
logHF0[0:NF0, normHF0==0] = np.amin(logHF0[logHF0>-np.Inf])
logHF0[NF0, :] = np.maximum(np.amin(logHF0[logHF0>-np.Inf]),-100)
indexBestPath = viterbiTrackingArray(\
logHF0, np.log(priorProbabilities),
np.log(transitionMatrixF0), verbose=options.verbose)
if displayEvolution:
h2.hold(True)
plt.plot(indexBestPath, '-b')
h2.hold(False)
plt.axis('tight')
## raw_input("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"\
## "!! Press Return to resume the program !!\n"\
## "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
del logHF0
# detection of silences:
HF00 = np.zeros([NF0 * chirpPerF0, N])
scopeAllowedHF0 = 2.0 / 1.0
dim1index = np.array(\
np.maximum(\
np.minimum(\
np.outer(chirpPerF0 * indexBestPath,
np.ones(chirpPerF0 \
* (2 \
* np.floor(stepNotes / scopeAllowedHF0) \
+ 1))) \
+ np.outer(np.ones(N),
np.arange(-chirpPerF0 \
* np.floor(stepNotes / scopeAllowedHF0),
chirpPerF0 \
* (np.floor(stepNotes / scopeAllowedHF0) \
+ 1))),
chirpPerF0 * NF0 - 1),
0),
dtype=int).reshape(1, N * chirpPerF0 \
* (2 * np.floor(stepNotes / scopeAllowedHF0) \
+ 1))
dim2index = np.outer(np.arange(N),
np.ones(chirpPerF0 \
* (2 * np.floor(stepNotes \
/ scopeAllowedHF0) + 1), \
dtype=int)\
).reshape(1, N * chirpPerF0 \
* (2 * np.floor(stepNotes \
/ scopeAllowedHF0) \
+ 1))
HF00[dim1index, dim2index] = HF0[dim1index, dim2index]# HF0.max()
HF00[:, indexBestPath == (NF0 - 1)] = 0.0
HF00[:, indexBestPath == 0] = 0.0
thres_energy = 0.000584
SF0 = np.maximum(np.dot(WF0, HF00), eps)
SPHI = np.maximum(np.dot(WGAMMA, np.dot(HGAMMA, HPHI)), eps)
SM = np.maximum(np.dot(WM, HM), eps)
hatSX = np.maximum(SPHI * SF0 + SM, eps)
energyMel = np.sum(np.abs((SPHI * SF0)/hatSX * \
(XR+XL) * 0.5) \
** 2, axis=0)
energyMelSorted = np.sort(energyMel)
energyMelCumul = np.cumsum(energyMelSorted)
energyMelCumulNorm = energyMelCumul / max(energyMelCumul[-1], eps)
# normalized to the maximum of energy:
# expressed in 0.01 times the percentage
ind_999 = np.nonzero(energyMelCumulNorm>thres_energy)[0][0]
if ind_999 is None:
ind_999 = N
melNotPresent = (energyMel <= energyMelCumulNorm[ind_999])
indexBestPath[melNotPresent] = 0
else:
## take the provided melody line:
# load melody from file:
melodyFromFile = np.loadtxt(options.melody)
sizeProvidedMel = melodyFromFile.shape
if len(sizeProvidedMel) == 1:
print "The melody should be provided as <Time (s)><F0 (Hz)>."
raise ValueError("Bad melody format")
melTimeStamps = melodyFromFile[:,0] # + 1024 / np.double(Fs)
melFreqHz = melodyFromFile[:,1]
if minF0 > melFreqHz[melFreqHz>40.0].min() or maxF0 < melFreqHz.max():
minF0 = melFreqHz[melFreqHz>40.0].min() *.97
maxF0 = np.maximum(melFreqHz.max()*1.03, 2*minF0 * 1.03)
print "Recomputing the source basis for "
print "minF0 = ", minF0, "Hz and maxF0 = ", maxF0, "Hz."
# Create the harmonic combs, for each F0 between minF0 and maxF0:
F0Table, WF0 = \
generate_WF0_chirped(minF0, maxF0, Fs, Nfft=NFT, \
stepNotes=stepNotes, \
lengthWindow=windowSizeInSamples,
Ot=0.25, \
perF0=chirpPerF0, \
depthChirpInSemiTone=.15)
WF0 = WF0[0:F, :] # ensure same size as SX
NF0 = F0Table.size # number of harmonic combs
# Normalization:
WF0 = WF0 / np.outer(np.ones(F), np.amax(WF0, axis=0))
sigTimeStamps = np.arange(N) * hopsize / np.double(Fs)
distMatTimeStamps = np.abs(np.outer(np.ones(sizeProvidedMel[0]),
sigTimeStamps) -
np.outer(melTimeStamps, np.ones(N)))
minDistTimeStamps = distMatTimeStamps.argmin(axis=0)
f0BestPath = melFreqHz[minDistTimeStamps]
distMatF0 = np.abs(np.outer(np.ones(NF0), f0BestPath) -
np.outer(F0Table, np.ones(N)))
indexBestPath = distMatF0.argmin(axis=0)
# setting silences to 0, with tolerance = 1/2 window length
indexBestPath[distMatTimeStamps[minDistTimeStamps,range(N)] >= \
0.5 * options.windowSize] = 0
indexBestPath[f0BestPath<=0] = 0
freqMelody = F0Table[np.array(indexBestPath,dtype=int)]
freqMelody[indexBestPath==0] = - freqMelody[indexBestPath==0]
np.savetxt(options.pitch_output_file,
np.array([np.arange(N) * hopsize / np.double(Fs),
freqMelody]).T)
# Second round of parameter estimation, with specific
# initial HF00:
HF00 = np.zeros([NF0 * chirpPerF0, N])
scopeAllowedHF0 = 2.0 / 1.0
# indexes for HF00:
# TODO: reprogram this with a 'where'?...
dim1index = np.array(\
np.maximum(\
np.minimum(\
np.outer(chirpPerF0 * indexBestPath,
np.ones(chirpPerF0 \
* (2 \
* np.floor(stepNotes / scopeAllowedHF0) \
+ 1))) \
+ np.outer(np.ones(N),
np.arange(-chirpPerF0 \
* np.floor(stepNotes / scopeAllowedHF0),
chirpPerF0 \
* (np.floor(stepNotes / scopeAllowedHF0) \
+ 1))),
chirpPerF0 * NF0 - 1),
0),
dtype=int)
dim1index = dim1index[indexBestPath!=0,:]
## dim1index = dim1index.reshape(1, N * chirpPerF0 \
## * (2 * np.floor(stepNotes / scopeAllowedHF0) \
## + 1))
dim1index = dim1index.reshape(1,dim1index.size)
dim2index = np.outer(np.arange(N),
np.ones(chirpPerF0 \
* (2 * np.floor(stepNotes \
/ scopeAllowedHF0) + 1), \
dtype=int)\
)
dim2index = dim2index[indexBestPath!=0,:]
dim2index = dim2index.reshape(1,dim2index.size)
## dim2index.reshape(1, N * chirpPerF0 \
## * (2 * np.floor(stepNotes \
## / scopeAllowedHF0) \
## + 1))
HF00[dim1index, dim2index] = 1 # HF0.max()
HF00[:, indexBestPath == (NF0 - 1)] = 0.0
HF00[:, indexBestPath == 0] = 0.0
WF0effective = WF0
HF00effective = HF00
if options.melody is None:
del HF0, HGAMMA, HPHI, HM, WM, HF00, SX
alphaR, alphaL, HGAMMA, HPHI, HF0, \
betaR, betaL, HM, WM, recoError2 = SIMM.Stereo_SIMM(
# the data to be fitted to:
SXR, SXL,
# the basis matrices for the spectral combs
WF0effective,
# and for the elementary filters:
WGAMMA,
# number of desired filters, accompaniment spectra:
numberOfFilters=K, numberOfAccompanimentSpectralShapes=R,
# if any, initial amplitude matrices for
HGAMMA0=None, HPHI0=None,
HF00=HF00effective,
WM0=None, HM0=None,
# Some more optional arguments, to control the "convergence"
# of the algo
numberOfIterations=niter, updateRulePower=1.0,
stepNotes=stepNotes,
lambdaHF0 = 0.0 / (1.0 * SXR.max()), alphaHF0=0.9,
verbose=options.verbose, displayEvolution=displayEvolution)
WPHI = np.dot(WGAMMA, HGAMMA)
SPHI = np.dot(WPHI, HPHI)
SF0 = np.dot(WF0effective, HF0)
hatSXR = (alphaR**2) * SF0 * SPHI + np.dot(np.dot(WM, betaR**2),HM)
hatSXL = (alphaL**2) * SF0 * SPHI + np.dot(np.dot(WM, betaL**2),HM)
hatVR = (alphaR**2) * SPHI * SF0 / hatSXR * XR
vestR = istft(hatVR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
hatVR = (alphaL**2) * SPHI * SF0 / hatSXL * XL
vestL = istft(hatVR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
#scikits.audiolab.wavwrite(np.array([vestR,vestL]).T, \
# options.voc_output_file, fs)
vestR = np.array(np.round(vestR*scaleData), dtype=dataType)
vestL = np.array(np.round(vestL*scaleData), dtype=dataType)
wav.write(options.voc_output_file, fs, \
np.array([vestR,vestL]).T)
#wav.write(options.voc_output_file, fs, \
# np.int16(32768.0 * np.array([vestR,vestL]).T))
hatMR = (np.dot(np.dot(WM,betaR ** 2),HM)) / hatSXR * XR
mestR = istft(hatMR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
hatMR = (np.dot(np.dot(WM,betaL ** 2),HM)) / hatSXL * XL
mestL = istft(hatMR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
#scikits.audiolab.wavwrite(np.array([mestR,mestL]).T, \
# options.mus_output_file, fs)
mestR = np.array(np.round(mestR*scaleData), dtype=dataType)
mestL = np.array(np.round(mestL*scaleData), dtype=dataType)
wav.write(options.mus_output_file, fs, \
np.array([mestR,mestL]).T)
#wav.write(options.mus_output_file, fs, \
# np.int16(32768.0 * np.array([mestR,mestL]).T))
del hatMR, mestL, vestL, vestR, mestR, hatVR, hatSXR, hatSXL, SPHI, SF0
# adding the unvoiced part in the source basis:
WUF0 = np.hstack([WF0, np.ones([WF0.shape[0], 1])])
HUF0 = np.vstack([HF0, np.ones([1, HF0.shape[1]])])
## HUF0[-1,:] = HF0.sum(axis=0) # should we do this?
alphaR, alphaL, HGAMMA, HPHI, HF0, \
betaR, betaL, HM, WM, recoError3 = SIMM.Stereo_SIMM(
# the data to be fitted to:
SXR, SXL,
# the basis matrices for the spectral combs
WUF0,
# and for the elementary filters:
WGAMMA,
# number of desired filters, accompaniment spectra:
numberOfFilters=K, numberOfAccompanimentSpectralShapes=R,
# if any, initial amplitude matrices for
HGAMMA0=HGAMMA, HPHI0=HPHI,
HF00=HUF0,
WM0=None,#WM,
HM0=None,#HM,
# Some more optional arguments, to control the "convergence"
# of the algo
numberOfIterations=niter, updateRulePower=1.0,
stepNotes=stepNotes,
lambdaHF0 = 0.0 / (1.0 * SXR.max()), alphaHF0=0.9,
verbose=options.verbose, displayEvolution=displayEvolution,
updateHGAMMA=False)
WPHI = np.dot(WGAMMA, HGAMMA)
SPHI = np.dot(WPHI, HPHI)
SF0 = np.dot(WUF0, HF0)
hatSXR = (alphaR**2) * SF0 * SPHI + np.dot(np.dot(WM, betaR**2),HM)
hatSXL = (alphaL**2) * SF0 * SPHI + np.dot(np.dot(WM, betaL**2),HM)
hatVR = (alphaR**2) * SPHI * SF0 / hatSXR * XR
vestR = istft(hatVR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
hatVR = (alphaL**2) * SPHI * SF0 / hatSXL * XL
vestL = istft(hatVR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
outputFileName = options.voc_output_file[:-4] + '_VUIMM.wav'
# scikits.audiolab.wavwrite(np.array([vestR,vestL]).T, outputFileName, fs)
vestR = np.array(np.round(vestR*scaleData), dtype=dataType)
vestL = np.array(np.round(vestL*scaleData), dtype=dataType)
wav.write(outputFileName, fs, \
np.array([vestR,vestL]).T)
hatMR = (np.dot(np.dot(WM,betaR ** 2),HM)) / hatSXR * XR
mestR = istft(hatMR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
hatMR = (np.dot(np.dot(WM,betaL ** 2),HM)) / hatSXL * XL
mestL = istft(hatMR, hopsize=hopsize, nfft=NFT,
window=sinebell(windowSizeInSamples)) / 4.0
outputFileName = options.mus_output_file[:-4] + '_VUIMM.wav'
#scikits.audiolab.wavwrite(np.array([mestR,mestL]).T, outputFileName, fs)
mestR = np.array(np.round(mestR*scaleData), dtype=dataType)
mestL = np.array(np.round(mestL*scaleData), dtype=dataType)
wav.write(outputFileName, fs, \
np.array([mestR,mestL]).T)
if displayEvolution:
plt.close('all')
## raw_input("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"\
## "!! Press Return to end the program... !!\n"\
## "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print "Done!"
if __name__ == '__main__':
main()
| mit | 3,770,763,837,554,159,600 | 40.138916 | 104 | 0.531349 | false |
zyga/taperecorder | taperecorder/tests.py | 1 | 9488 | from __future__ import print_function
from unittest import TestCase
from collections import deque
from taperecorder import create_observer_cls
from taperecorder import Action, ReactingCall
def throw(exc):
"""
Helper function to make raise an expression, not a statement
"""
raise exc
class AssertingReaction(object):
"""
Reaction class for testing.
Creates a reaction object that uses TestCase assertions to expect a
sequence of actions. Allows to customize each reaction with a callback.
"""
def __init__(self, testcase, auto_descriptors=False):
self.testcase = testcase
self.chain = deque()
self.auto_descriptors = auto_descriptors
def add(self, action, callback):
"""
Add a callback reaction to an expected action
"""
self.chain.append((action, callback))
def to(self, action):
"""
Provide action with a reaction
"""
# If auto_descriptors are enabled then service all descriptor calls
# automatically without cluttering the expected action chain.
if self.auto_descriptors:
if action.origin in ('__get__', '__set__', '__delete__'):
return ReactingCall(action.name, self)
# Pop the next item from the expected action / reaction chain
expected_action, callback = self.chain.popleft()
# Do some fine-tuned testing for easier error messages
self.testcase.assertEqual(action.origin, expected_action.origin)
self.testcase.assertEqual(action.name, expected_action.name)
self.testcase.assertEqual(action.args, expected_action.args)
self.testcase.assertEqual(action.kwargs, expected_action.kwargs)
# But test everything in case action gets changed
self.testcase.assertEqual(action, expected_action)
# Return what the callback did
return callback()
class ObserverTests(TestCase):
def setUp(self):
# The asserting reaction instance.
# We feed this puppy with assertion data, it checks things
# as we are using the object created later.
self.reaction = AssertingReaction(self)
# Automatically service descriptor calls
self.reaction.auto_descriptors = True
# The observer class that talks to the reaction instance above We don't
# use the class in any way but we need a per-reaction-instance class
# as we need methods on the metaclass being able to see the reaction
# instance.
self.cls = create_observer_cls(self.reaction)
# Spawn the observer, __all__ interaction on the observer gets
# delegated to the reaction class.
self.obj = self.cls()
def tearDown(self):
# As a part of the test process, check that we've used all of the
# reactions. If we didn't then the test did not really do what was
# assumed.
self.assertEqual(len(self.reaction.chain), 0,
"Not all reactions were used")
def test_str(self):
# Expect the method call on __str__()
self.reaction.add(
action=Action('__call__', '__str__', (), {}),
callback=lambda: "foo")
# Make sure we got it right
self.assertEqual(str(self.obj), "foo")
def test_repr(self):
# Expect the method call on __repr__()
self.reaction.add(
action=Action('__call__', '__repr__', (), {}),
callback=lambda: "<foo>")
# Make sure we got it right
self.assertEqual(repr(self.obj), "<foo>")
def test_setattr(self):
# Expect the method call on __setattr__('foo', 5)
self.reaction.add(
action=Action('__call__', '__setattr__', ('foo', 5), {}),
callback=lambda: None)
# Make sure we got it right
self.obj.foo = 5
def test_getattr(self):
# Expect the method call __getattr__('foo')
self.reaction.add(
action=Action('__call__', '__getattr__', ('foo',), {}),
callback=lambda: 5)
# Make sure we got it right
self.assertEqual(self.obj.foo, 5)
def test_delattr(self):
# Expect the method call __getattr__('foo')
self.reaction.add(
action=Action('__call__', '__delattr__', ('foo',), {}),
callback=lambda: 5)
# Make sure we got it right
del self.obj.foo
def test_method_calls(self):
# Expect the method call on __getattr__('foo')
# This will produce a ReactingCall object being returned
self.reaction.add(
action=Action('__call__', '__getattr__', ('foo',), {}),
callback=lambda: ReactingCall('foo', self.reaction))
# Then expect the method call on foo(1, arg=6)
self.reaction.add(
action=Action('__call__', 'foo', (1,), {"arg": 6}),
callback=lambda: 5)
# Make sure we got it right
self.assertEqual(self.obj.foo(1, arg=6), 5)
def test_lt(self):
# Expect the method call __lt__(10)
self.reaction.add(
action=Action('__call__', '__lt__', (10,), {}),
callback=lambda: True)
# Make sure we got it right
self.assertTrue(self.obj < 10)
def test_le(self):
# Expect the method call __le__(10)
self.reaction.add(
action=Action('__call__', '__le__', (10,), {}),
callback=lambda: True)
# Make sure we got it right
self.assertTrue(self.obj <= 10)
def test_eq(self):
# Expect the method call __le__(10)
self.reaction.add(
action=Action('__call__', '__eq__', (10,), {}),
callback=lambda: True)
# Make sure we got it right
self.assertTrue(self.obj == 10)
def test_ne(self):
# Expect the method call __ne__(10)
self.reaction.add(
action=Action('__call__', '__ne__', (10,), {}),
callback=lambda: True)
# Make sure we got it right
self.assertTrue(self.obj != 10)
def test_gt(self):
# Expect the method call __gt__(10)
self.reaction.add(
action=Action('__call__', '__gt__', (10,), {}),
callback=lambda: True)
# Make sure we got it right
self.assertTrue(self.obj > 10)
def test_ge(self):
# Expect the method call __ge__(10)
self.reaction.add(
action=Action('__call__', '__ge__', (10,), {}),
callback=lambda: True)
# Make sure we got it right
self.assertTrue(self.obj >= 10)
def test_le_via_cmp(self):
# Disable automatic descriptors as we want to be able to change
# default behavior.
self.reaction.auto_descriptors = False
# First expect the 'get' descriptor access on __le__
# This must raise AttributeError for python to keep searching
self.reaction.add(
action=Action('__get__', '__le__', (self.obj, self.cls), {}),
callback=lambda: throw(AttributeError))
# Then expect the 'get' descriptor access on __cmp___
# This will produce a ReactingCall object being returned
self.reaction.add(
action=Action('__get__', '__cmp__', (self.obj, self.cls), {}),
callback=lambda: ReactingCall('__cmp__', self.reaction))
# Expect the method call __cmp__(10)
self.reaction.add(
action=Action('__call__', '__cmp__', (10,), {}),
callback=lambda: -1)
# Make sure we got it right
self.assertTrue(self.obj <= 10)
def test_getitem(self):
# Expect the method call __getitem__(10)
self.reaction.add(
action=Action('__call__', '__getitem__', (10,), {}),
callback=lambda: "foo")
# Make sure we got it right
self.assertEqual(self.obj[10], "foo")
def test_setitem(self):
# Expect the method call __setitem__(10)
self.reaction.add(
action=Action('__call__', '__setitem__', (10, "foo"), {}),
callback=lambda: None)
# Make sure we got it right
self.obj[10] = "foo"
def test_delitem(self):
# Expect the method call __delitem__(10)
self.reaction.add(
action=Action('__call__', '__delitem__', (10,), {}),
callback=lambda: None)
# Make sure we got it right
del self.obj[10]
def test_isinstance(self):
# Expect a call to __instancecheck__ to see if 'foo'
# is an instance of our observer class
self.reaction.add(
action=Action('__instancecheck__', None, ('foo',), {}),
callback=lambda: True)
# Make sure we got it right
self.assertTrue(isinstance('foo', self.cls))
def test_issubclass(self):
# Expect a call to __subclasscheck__ to see if 'foo'
# is a subclass of our observer class
self.reaction.add(
action=Action('__subclasscheck__', None, ('foo',), {}),
callback=lambda: True)
# Make sure we got it right
self.assertTrue(issubclass('foo', self.cls))
def test_dir_contents(self):
# Expect the method call __dir__
self.reaction.add(
Action('__call__', '__dir__', (), {}),
lambda: ['b', 'c', 'a'])
# Make sure we got it right.
# Caveat: python sorts the result internally
self.assertEqual(dir(self.obj), ['a', 'b', 'c'])
| lgpl-3.0 | 9,028,778,813,376,204,000 | 36.354331 | 79 | 0.568824 | false |
xcme/swtoolz-core | devices/DGS-3627G.py | 1 | 9900 | # coding=UTF8
# Строчка выше нужна на случай использования Non-ASCII символов, например кириллицы.
# Корректирующий множитель для таймаута SNMP-операций. Чем медленнее CPU устройства, тем больше должен быть множитель.
# Этот параметр используется (если задан) для swtoolz-core. Остальные параметры целиком определяются пользователем.
timeout_mf = 1.2
# Карта портов устройства. Представлена в виде списков слотов. Каждый слот содержит список рядов. Каждый ряд содержит список портов.
DeviceMap = ([
[
['1','3','5','7','9','11','13','15','17','19','21','23'],
['2','4','6','8','10','12','14','16','18','20','22','24','25','26','27']
],
],)
# SlotSize - количество индексов, отведенное на слот. Обычно это 64, то есть слот №1 - 1..64, слот №2 - 65..128, слот №3 - 129..192 и так далее.
# ShiftIndex - смещение, которое нужно прибавить к индексу. У некоторых устройств первый индекс может начинаться, например, с 256.
# MaxIndex - Максимальный индекс, который нужно обработать. Индексы с большими номерами игнорируются.
# ComboDefMedType - тип среды передачи по умолчанию для комбо-порта.
StackInfo = ({
'SlotSize' : '64',
'ShiftIndex' : '0',
# 'MaxIndex' : '64',
'ComboDefMedType' : 'copper',
},)
# Список рекомендуемых команд
Commands = ([
'DeviceMap',
'StackInfo',
'MediumType',
'ActualStatus',
'ActualSpeed',
'AdminStatus',
'AdminSpeed',
'AdminFlow',
'PortType',
'BoardDescrShort',
'walk_PortIndex',
'walk_BoardDescr',
'get_HardwareRev',
'walk_ifName',
'walk_ifAlias',
],)
# swL2PortInfoMediumType
MediumType = ({
'1' : 'copper',
'2' : 'fiber',
},)
# swL2PortInfoLinkStatus
ActualStatus = ({
'1' : 'other',
'2' : 'linkup',
'3' : 'linkdown',
},)
# swL2PortInfoNwayStatus
ActualSpeed = ({
'0' : 'linkdown',
'1' : '10M-Full-8023x',
'2' : '10M-Full',
'3' : '10M-Half-backp',
'4' : '10M-Half',
'5' : '100M-Full-8023x',
'6' : '100M-Full',
'7' : '100M-Half-backp',
'8' : '100M-Half',
'9' : '1G-Full-8023x',
'10' : '1G-Full',
'11' : '1G-Half-backp',
'12' : '1G-Half',
'13' : '10G-Full-8023x',
'14' : '10G-Full',
'15' : '10G-Half-8023x',
'16' : '10G-Half',
'17' : 'empty',
'18' : 'err-disabled',
},)
# swL2PortCtrlAdminState
AdminStatus = ({
'2' : 'disabled',
'3' : 'enabled',
},)
# swL2PortCtrlNwayState
AdminSpeed = ({
'2' : 'auto',
'3' : '10M-Half',
'4' : '10M-Full',
'5' : '100M-Half',
'6' : '100M-Full',
'7' : '1G-Half',
'8' : '1G-Full',
'9' : '1G-Full-master',
'10': '1G-Full-slave',
},)
# swL2PortCtrlFlowCtrlState
AdminFlow = ({
'1' : 'other',
'2' : 'disabled',
'3' : 'enabled',
},)
# ifType
PortType = ({
'1' : 'other',
'6' : 'fastEthernet',
'117' : 'gigaEthernet',
},)
BoardDescrShort = ({
'NOT_EXIST' : '',
},)
get_HardwareRev = {
# HardwareRev .1.3.6.1.2.1.16.19.3.0 probeHardwareRev
'HardwareRev.' : '.1.3.6.1.2.1.16.19.3.0',
}
walk_PortIndex = {
# PortIndex .1.3.6.1.4.1.171.11.70.8.2.3.1.1.1 swL2PortInfoPortIndex
'PortIndex' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1',
}
walk_BoardDescr = {
# BoardBescr .1.3.6.1.4.1.171.12.11.1.9.4.1.9 swUnitMgmtModuleName
'BoardDescr' : '.1.3.6.1.4.1.171.12.11.1.9.4.1.9',
}
get_PortIndex = {
# PortIndex .1.3.6.1.4.1.171.11.70.8.2.3.1.1.1 swL2PortInfoPortIndex
'PortIndex..1' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.1.2',
'PortIndex..2' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.2.2',
'PortIndex..3' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.3.2',
'PortIndex..4' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.4.2',
'PortIndex..5' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.5.2',
'PortIndex..6' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.6.2',
'PortIndex..7' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.7.2',
'PortIndex..8' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.8.2',
'PortIndex..9' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.9.2',
'PortIndex..10' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.10.2',
'PortIndex..11' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.11.2',
'PortIndex..12' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.12.2',
'PortIndex..13' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.13.2',
'PortIndex..14' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.14.2',
'PortIndex..15' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.15.2',
'PortIndex..16' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.16.2',
'PortIndex..17' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.17.2',
'PortIndex..18' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.18.2',
'PortIndex..19' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.19.2',
'PortIndex..20' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.20.2',
'PortIndex..21c' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.21.1',
'PortIndex..21f' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.21.2',
'PortIndex..22c' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.22.1',
'PortIndex..22f' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.22.2',
'PortIndex..23c' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.23.1',
'PortIndex..23f' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.23.2',
'PortIndex..24c' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.24.1',
'PortIndex..24f' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.24.2',
'PortIndex..25' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.25.1',
'PortIndex..26' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.26.1',
'PortIndex..27' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.1.27.1',
}
get_SinglePort = {
# MediumType .1.3.6.1.4.1.171.11.70.8.2.3.1.1.2 swL2PortInfoMediumType
'MediumType..c' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.2.%s.1',
'MediumType..f' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.2.%s.2',
# ActualStatus .1.3.6.1.4.1.171.11.70.8.2.3.1.1.5 swL2PortInfoLinkStatus
'ActualStatus..c' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.5.%s.1',
'ActualStatus..f' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.5.%s.2',
# ActualSpeed .1.3.6.1.4.1.171.11.70.8.2.3.1.1.6 swL2PortInfoNwayStatus
'ActualSpeed..c' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.6.%s.1',
'ActualSpeed..f' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.6.%s.2',
# AdminStatus .1.3.6.1.4.1.171.11.70.8.2.3.2.1.4 swL2PortCtrlAdminState
'AdminStatus..c' : '.1.3.6.1.4.1.171.11.70.8.2.3.2.1.4.%s.1',
'AdminStatus..f' : '.1.3.6.1.4.1.171.11.70.8.2.3.2.1.4.%s.2',
# AdminSpeed .1.3.6.1.4.1.171.11.70.8.2.3.2.1.5 swL2PortCtrlNwayState
'AdminSpeed..c' : '.1.3.6.1.4.1.171.11.70.8.2.3.2.1.5.%s.1',
'AdminSpeed..f' : '.1.3.6.1.4.1.171.11.70.8.2.3.2.1.5.%s.2',
# AdminFlow .1.3.6.1.4.1.171.11.70.8.2.3.2.1.6 swL2PortCtrlFlowCtrlState
'AdminFlow..c' : '.1.3.6.1.4.1.171.11.70.8.2.3.2.1.6.%s.1',
'AdminFlow..f' : '.1.3.6.1.4.1.171.11.70.8.2.3.2.1.6.%s.2',
# PortType .1.3.6.1.2.1.2.2.1.3 ifType
'PortType.' : '.1.3.6.1.2.1.2.2.1.3.%s',
# PortName .1.3.6.1.2.1.31.1.1.1.1 ifName
'PortName.' : '.1.3.6.1.2.1.31.1.1.1.1.%s',
# PortDescr .1.3.6.1.2.1.31.1.1.1.18 ifAlias
'PortDescr.' : '.1.3.6.1.2.1.31.1.1.1.18.%s',
}
walk_AllPorts = {
# MediumType .1.3.6.1.4.1.171.11.70.8.2.3.1.1.2 swL2PortInfoMediumType
'MediumType' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.2',
# ActualStatus .1.3.6.1.4.1.171.11.70.8.2.3.1.1.5 swL2PortInfoLinkStatus
'ActualStatus' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.5',
# ActualSpeed .1.3.6.1.4.1.171.11.70.8.2.3.1.1.6 swL2PortInfoNwayStatus
'ActualSpeed' : '.1.3.6.1.4.1.171.11.70.8.2.3.1.1.6',
# AdminStatus .1.3.6.1.4.1.171.11.70.8.2.3.2.1.4 swL2PortCtrlAdminState
'AdminStatus' : '.1.3.6.1.4.1.171.11.70.8.2.3.2.1.4',
# AdminSpeed .1.3.6.1.4.1.171.11.70.8.2.3.2.1.5 swL2PortCtrlNwayState
'AdminSpeed' : '.1.3.6.1.4.1.171.11.70.8.2.3.2.1.5',
# AdminFlow .1.3.6.1.4.1.171.11.70.8.2.3.2.1.6 swL2PortCtrlFlowCtrlState
'AdminFlow' : '.1.3.6.1.4.1.171.11.70.8.2.3.2.1.6',
}
walk_ifName = {
# PortName .1.3.6.1.2.1.31.1.1.1.1 ifName
'PortName' : '.1.3.6.1.2.1.31.1.1.1.1',
}
walk_ifAlias = {
# PortDescr .1.3.6.1.2.1.31.1.1.1.18 ifAlias
'PortDescr' : '.1.3.6.1.2.1.31.1.1.1.18',
}
walk_FDB_VLAN = {
# FDB .1.3.6.1.2.1.17.7.1.2.2.1.2 dot1qTpFdbPort
'FDB' : '.1.3.6.1.2.1.17.7.1.2.2.1.2.%s',
}
walk_VlanMap = {
# VlanName .1.3.6.1.2.1.17.7.1.4.3.1.1 dot1qVlanStaticName
'VlanName' : '.1.3.6.1.2.1.17.7.1.4.3.1.1',
# EgressPorts .1.3.6.1.2.1.17.7.1.4.3.1.2 dot1qVlanStaticEgressPorts
'EgressPorts' : '.1.3.6.1.2.1.17.7.1.4.3.1.2',
}
walk_VlanEgressPorts = {
# VEP .1.3.6.1.2.1.17.7.1.4.3.1.2 dot1qVlanStaticEgressPorts
'VEP' : '.1.3.6.1.2.1.17.7.1.4.3.1.2',
}
walk_VlanUntaggedPorts = {
# VUP .1.3.6.1.2.1.17.7.1.4.3.1.4 dot1qVlanStaticUntaggedPorts
'VUP' : '.1.3.6.1.2.1.17.7.1.4.3.1.4',
}
| gpl-3.0 | -5,599,887,222,430,966,000 | 37.348548 | 144 | 0.525103 | false |
mmolero/pcloudpy | pcloudpy/core/filters/OrientedNormalEstimation.py | 1 | 3575 |
"""
Class that define oriented normal estimation method based on PCA Eigen method to fit plane and minimum spanning tree
"""
__all__ = ["OrientedNormalsEstimation"]
import numpy as np
from scipy.linalg import eigh
from sklearn.neighbors import NearestNeighbors
import networkx as nx
from pcloudpy.core.filters.base import FilterBase
from ..io.converters import numpy_from_polydata, copy_polydata_add_normals
class OrientedNormalsEstimation(FilterBase):
"""
NormalEstimation filter estimates normals of a point cloud using PCA Eigen method to fit plane
Parameters
----------
number_neighbors: int
number of neighbors to be considered in the normals estimation
Attributes
----------
input_: vtkPolyData
Input Data to be filtered
output_: vtkPolyData
Output Data
"""
def __init__(self, number_neighbors = 10):
self.number_neighbors = number_neighbors
def update(self):
array_with_color = numpy_from_polydata(self.input_)
normals = np.empty_like(array_with_color[:,0:3])
coord = array_with_color[:,0:3]
neigh = NearestNeighbors(self.number_neighbors)
neigh.fit(coord)
for i in range(0,len(coord)):
#Determine the neighbours of point
d = neigh.kneighbors(coord[i])
#Add coordinates of neighbours , dont include center point to array. Determine coordinate by the index of the neighbours.
y = np.zeros((self.number_neighbors-1,3))
y = coord[d[1][0][1:self.number_neighbors],0:3]
#Get information content
#Assign information content to each point i.e xyzb
normals[i,0:3] = self.get_normals(y)
#Get the point with highest z value , this will be used as the starting point for my depth search
z_max_point = np.where(coord[:,2]== np.max(coord[:,2]))
z_max_point = int(z_max_point[0])
if normals[z_max_point,2] < 0 : #ie normal doesnt point out
normals[z_max_point,:]=-normals[z_max_point,:]
#Create a graph
G = nx.Graph()
#Add all points and there neighbours to graph, make the weight equal to the distance between points
for i in range(0,len(coord)):
d = neigh.kneighbors(coord[i,:3])
for c in range(1,self.number_neighbors):
p1 = d[1][0][0]
p2 = d[1][0][c]
n1 = normals[d[1][0][0],:]
n2 = normals[d[1][0][c],:]
dot = np.dot(n1,n2)
G.add_edge(p1,p2,weight =1-np.abs(dot))
T = nx.minimum_spanning_tree(G)
x=[]
for i in nx.dfs_edges(T,z_max_point):
x+=i
inds = np.where(np.diff(x))[0]
out = np.split(x,inds[np.diff(inds)==1][1::2]+1)
for j in range(0,len(out)):
for i in range(0,len(out[j])-1):
n1 = normals[out[j][i],:]
n2 = normals[out[j][i+1],:]
if np.dot(n2,n1)<0:
normals[out[j][i+1],:]=-normals[out[j][i+1],:]
self.output_ = copy_polydata_add_normals(self.input_, normals)
def get_normals(self, XYZ):
#The below code uses the PCA Eigen method to fit plane.
#Get the covariance matrix
average = np.sum(XYZ, axis=0)/XYZ.shape[0]
b = np.transpose(XYZ - average)
cov = np.cov(b)
#Get eigen val and vec
e_val,e_vect = eigh(cov, overwrite_a=True, overwrite_b=True)
norm = e_vect[:,0]
return norm | bsd-3-clause | -1,820,266,291,135,787,500 | 28.8 | 133 | 0.582098 | false |
iotile/coretools | transport_plugins/bled112/iotile_transport_bled112/broadcast_v2_dedupe.py | 1 | 3901 | """This module is used to identify and filter out broadcast v2 broadcasts, which leads to significant
performance increases.
"""
import time
import struct
import collections
from typing import Dict
from iotile.cloud.utilities import device_id_to_slug
def packet_is_broadcast_v2(packet: bytearray) -> bool:
"""Simple/efficient check for whether a given packet from the bled112 is an IOTile Broadcast v2 packet."""
#Broadcast packets consist of 32 bytes for data, 10 for BLE packet header and 4 for bled112 bgapi header
if len(packet) != 46:
return False
#This identifies the bgapi packet as an event
if not (packet[0] == 0x80 and packet[2] == 6 and packet[3] == 0):
return False
#This identifies the event as a broadcast v2 packet
if not (packet[18] == 0x1b and packet[19] == 0x16 and packet[20] == 0xdd and packet[21] == 0xfd):
return False
return True
class BroadcastV2DeduperCollection:
"""Main interface into the Broadcast v2 deduplication code.
This contains a dictionary, keyed on the broadcast sender's encoded UUID, and with the values being
a small class that stores the last received packet from that UUID and the last time the packet
was forwarded. That class (bc_v2_deduper) will report whether the packet is new and should be allowed through.
Args:
pass_packets_every(float, seconds): For each encoded_uuid address, at least one packet will be allowed through
every "pass_packets_every" seconds
"""
MAX_DEDUPERS = 500
def __init__(self, pass_packets_every: float = 5):
self._pass_packets_every = pass_packets_every
self.dedupers = collections.OrderedDict() #type: collections.OrderedDict[bytes, BroadcastV2Deduper]
def allow_packet(self, packet: bytearray) -> bool:
"""Run a packet through the broadcast_v2 deduper.
Returns False if the packet should be dropped
"""
if not packet_is_broadcast_v2(packet):
return True
encoded_uuid = bytes(packet[22:26])
stream = bytes(packet[36:38])
uuid_and_stream = (encoded_uuid, stream)
data = bytes(packet[22:])
deduper = self.dedupers.get(uuid_and_stream)
if deduper is None:
deduper = BroadcastV2Deduper(uuid_and_stream, self._pass_packets_every)
if len(self.dedupers) == self.MAX_DEDUPERS:
self.evict_oldest_deduper()
self.dedupers[uuid_and_stream] = deduper
return deduper.allow_packet(data)
def evict_oldest_deduper(self):
"""Find and remove the oldest deduper
This function will likely be called rarely, if at all
"""
self.dedupers.popitem(last=False)
class BroadcastV2Deduper():
"""Individual deduplicator for an specific UUID and stream."""
def __init__(self, uuid_and_stream: tuple, pass_packets_every: float = 5):
self.encoded_uuid = uuid_and_stream[0]
self._pass_packets_every = pass_packets_every
self.last_allowed_packet = 0 #type: float
self.last_data = bytes()
self._slug = ""
def get_slug(self):
"""For debugging, unpack the UUID into a slug so it can be printed. Only do this if needed though."""
if self._slug:
return self._slug
uuid = struct.unpack("<L", self.encoded_uuid)
self._slug = device_id_to_slug("%04X" % uuid)
return self._slug
def allow_packet(self, broadcast_data: bytes)-> bool:
"""Check if the packet is allowed. If so, save it and return True. Otherwise return False."""
if (time.monotonic() > self.last_allowed_packet + self._pass_packets_every or
self.last_data != broadcast_data):
self.last_data = broadcast_data
self.last_allowed_packet = time.monotonic()
return True
return False
| gpl-3.0 | -8,435,621,857,235,197,000 | 37.623762 | 118 | 0.655473 | false |
topseer/django | dJangoAdmin/dJangoAdmin/urls.py | 1 | 1593 | """
locallibrary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
# Use include() to add URLS from the catalog application
from django.conf.urls import include
urlpatterns += [
url(r'^catalog/', include('catalog.urls')),
]
urlpatterns += [
url(r'^polls/', include('polls.urls')),
]
#Add Django site authentication urls (for login, logout, password management)
urlpatterns += [
url(r'^accounts/', include('django.contrib.auth.urls')),
url(r'^catalog/accounts/', include('django.contrib.auth.urls')),
url(r'^catalog/dashboard/accounts/', include('django.contrib.auth.urls')),
]
# Use static() to add url mapping to serve static files during development (only)
from django.conf import settings
from django.conf.urls.static import static
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) | mit | -268,994,568,428,312,130 | 31.229167 | 81 | 0.689893 | false |
nacl-webkit/native_client | tools/toolchain_tester/toolchain_config.py | 1 | 11986 | # Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Config file for various nacl compilation scenarios
#
import os
import sys
TOOLCHAIN_CONFIGS = {}
class ToolchainConfig(object):
def __init__(self, desc, commands, tools_needed, is_flaky=False,
attributes=[], **extra):
self._desc = desc,
self._commands = commands
self._tools_needed = tools_needed
self._extra = extra
self._flaky = is_flaky
self._attributes = attributes
def Append(self, tag, value):
assert tag in self._extra
self._extra[tag] = self._extra[tag] + ' ' + value + ' '
def SanityCheck(self):
for t in self._tools_needed:
if not os.access(t, os.R_OK | os.X_OK):
print "ERROR: missing tool ", t
sys.exit(-1)
def GetDescription(self):
return self._desc
def GetCommands(self, extra):
for tag, val in self._commands:
d = {}
d.update(self._extra)
d.update(extra)
yield tag, val % d
def GetPhases(self):
return [a for (a, _) in self._commands]
def IsFlaky(self):
return self._flaky
def GetAttributes(self):
return set(self._attributes)
######################################################################
#
######################################################################
LOCAL_GCC = '/usr/bin/gcc'
EMU_SCRIPT = 'toolchain/linux_arm-trusted/run_under_qemu_arm'
TEMPLATE_DIGITS = 'X' * 16
BOOTSTRAP_ARGS = '--r_debug=0x%s --reserved_at_zero=0x%s' % (TEMPLATE_DIGITS,
TEMPLATE_DIGITS)
BOOTSTRAP_ARM = 'scons-out/opt-linux-arm/staging/nacl_helper_bootstrap'
SEL_LDR_ARM = 'scons-out/opt-linux-arm/staging/sel_ldr'
IRT_ARM = 'scons-out/nacl_irt-arm/obj/src/untrusted/irt/irt_core.nexe'
RUN_SEL_LDR_ARM = BOOTSTRAP_ARM + ' ' + SEL_LDR_ARM + ' ' + BOOTSTRAP_ARGS
BOOTSTRAP_X32 = 'scons-out/opt-linux-x86-32/staging/nacl_helper_bootstrap'
SEL_LDR_X32 = 'scons-out/opt-linux-x86-32/staging/sel_ldr'
IRT_X32 = 'scons-out/nacl_irt-x86-32/obj/src/untrusted/irt/irt_core.nexe'
RUN_SEL_LDR_X32 = BOOTSTRAP_X32 + ' ' + SEL_LDR_X32 + ' ' + BOOTSTRAP_ARGS
BOOTSTRAP_X64 = 'scons-out/opt-linux-x86-64/staging/nacl_helper_bootstrap'
SEL_LDR_X64 = 'scons-out/opt-linux-x86-64/staging/sel_ldr'
IRT_X64 = 'scons-out/nacl_irt-x86-64/obj/src/untrusted/irt/irt_core.nexe'
RUN_SEL_LDR_X64 = BOOTSTRAP_X64 + ' ' + SEL_LDR_X64 + ' ' + BOOTSTRAP_ARGS
NACL_GCC_X32 = 'toolchain/linux_x86_newlib/bin/i686-nacl-gcc'
NACL_GCC_X64 = 'toolchain/linux_x86_newlib/bin/x86_64-nacl-gcc'
GLOBAL_CFLAGS = ' '.join(['-DSTACK_SIZE=0x40000',
'-D__SIZEOF_INT__=4',
'-D__SIZEOF_LONG__=4',
'"-D__INT_LEAST8_TYPE__=signed char"',
'"-D__UINT_LEAST32_TYPE__=unsigned int"',
'-D_XOPEN_SOURCE=600',
'-DNO_TRAMPOLINES',
'-DNO_LABEL_VALUES',])
CLANG_CFLAGS = ' '.join(['-fwrapv',
'-fdiagnostics-show-category=name'])
######################################################################
# LOCAL GCC
######################################################################
COMMANDS_local_gcc = [
('compile',
'%(CC)s %(src)s %(CFLAGS)s -o %(tmp)s.exe -lm -lstdc++',
),
('run',
'%(tmp)s.exe',
),
]
TOOLCHAIN_CONFIGS['local_gcc_x8632_O0'] = ToolchainConfig(
desc='local gcc [x86-32]',
attributes=['x86-32', 'O0'],
commands=COMMANDS_local_gcc,
tools_needed=[LOCAL_GCC],
CC = LOCAL_GCC,
CFLAGS = '-O0 -m32 -static ' + GLOBAL_CFLAGS)
TOOLCHAIN_CONFIGS['local_gcc_x8632_O3'] = ToolchainConfig(
desc='local gcc [x86-32]',
attributes=['x86-32', 'O3'],
commands=COMMANDS_local_gcc,
tools_needed=[LOCAL_GCC],
CC = LOCAL_GCC,
CFLAGS = '-O3 -m32 -static ' + GLOBAL_CFLAGS)
TOOLCHAIN_CONFIGS['local_gcc_x8664_O0'] = ToolchainConfig(
desc='local gcc [x86-64]',
attributes=['x86-64', 'O0'],
commands=COMMANDS_local_gcc,
tools_needed=[LOCAL_GCC],
CC = LOCAL_GCC,
CFLAGS = '-O0 -m64 -static ' + GLOBAL_CFLAGS)
TOOLCHAIN_CONFIGS['local_gcc_x8664_O3'] = ToolchainConfig(
attributes=['x86-64', 'O3'],
desc='local gcc [x86-64]',
commands=COMMANDS_local_gcc,
tools_needed=[LOCAL_GCC],
CC = LOCAL_GCC,
CFLAGS = '-O3 -m64 -static ' + GLOBAL_CFLAGS)
######################################################################
# CS ARM
######################################################################
# NOTE: you may need this if you see mmap: Permission denied
# "echo 0 > /proc/sys/vm/mmap_min_addr"
GCC_CS_ARM = ('toolchain/linux_arm-trusted/arm-2009q3/' +
'bin/arm-none-linux-gnueabi-gcc')
COMMANDS_gcc_cs_arm = [
('compile',
'%(CC)s %(src)s %(CFLAGS)s -Wl,-Ttext-segment=20000 -o %(tmp)s.exe',
),
('emu',
'%(EMU_SCRIPT)s %(tmp)s.exe',
)
]
TOOLCHAIN_CONFIGS['gcc_cs_arm_O0'] = ToolchainConfig(
desc='codesourcery cross gcc [arm]',
attributes=['arm', 'O0'],
commands=COMMANDS_gcc_cs_arm,
tools_needed=[GCC_CS_ARM, EMU_SCRIPT ],
CC = GCC_CS_ARM,
EMU_SCRIPT = EMU_SCRIPT,
CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS)
TOOLCHAIN_CONFIGS['gcc_cs_arm_O3'] = ToolchainConfig(
desc='codesourcery cross gcc [arm]',
attributes=['arm', 'O3'],
commands=COMMANDS_gcc_cs_arm,
tools_needed=[GCC_CS_ARM, EMU_SCRIPT ],
CC = GCC_CS_ARM,
EMU_SCRIPT = EMU_SCRIPT,
CFLAGS = '-O3 -static ' + GLOBAL_CFLAGS)
######################################################################
# # NACL + SEL_LDR [X86]
######################################################################
COMMANDS_nacl_gcc = [
('compile',
'%(CC)s %(src)s %(CFLAGS)s -o %(tmp)s.exe -lm -lstdc++',
),
('sel_ldr',
'%(SEL_LDR)s -B %(IRT)s %(tmp)s.exe',
)
]
TOOLCHAIN_CONFIGS['nacl_gcc_x8632_O0'] = ToolchainConfig(
desc='nacl gcc [x86-32]',
attributes=['x86-32', 'O0'],
commands=COMMANDS_nacl_gcc,
tools_needed=[NACL_GCC_X32, BOOTSTRAP_X32, SEL_LDR_X32],
CC = NACL_GCC_X32,
SEL_LDR = RUN_SEL_LDR_X32,
IRT = IRT_X32,
CFLAGS = '-O0 -static -Bscons-out/nacl-x86-32/lib/ ' + GLOBAL_CFLAGS)
TOOLCHAIN_CONFIGS['nacl_gcc_x8632_O3'] = ToolchainConfig(
desc='nacl gcc with optimizations [x86-32]',
attributes=['x86-32', 'O3'],
commands=COMMANDS_nacl_gcc,
tools_needed=[NACL_GCC_X32, BOOTSTRAP_X32, SEL_LDR_X32],
CC = NACL_GCC_X32,
SEL_LDR = RUN_SEL_LDR_X32,
IRT = IRT_X32,
CFLAGS = '-O3 -static -Bscons-out/nacl-x86-32/lib/ ' + GLOBAL_CFLAGS)
TOOLCHAIN_CONFIGS['nacl_gcc_x8664_O0'] = ToolchainConfig(
desc='nacl gcc [x86-64]',
attributes=['x86-64', 'O0'],
commands=COMMANDS_nacl_gcc,
tools_needed=[NACL_GCC_X64, BOOTSTRAP_X64, SEL_LDR_X64],
CC = NACL_GCC_X64,
SEL_LDR = RUN_SEL_LDR_X64,
IRT = IRT_X64,
CFLAGS = '-O0 -static -Bscons-out/nacl-x86-64/lib/ ' + GLOBAL_CFLAGS)
TOOLCHAIN_CONFIGS['nacl_gcc_x8664_O3'] = ToolchainConfig(
desc='nacl gcc with optimizations [x86-64]',
attributes=['x86-32', 'O3'],
commands=COMMANDS_nacl_gcc,
tools_needed=[NACL_GCC_X64, BOOTSTRAP_X64, SEL_LDR_X64],
CC = NACL_GCC_X64,
SEL_LDR = RUN_SEL_LDR_X64,
IRT = IRT_X64,
CFLAGS = '-O3 -static -Bscons-out/nacl-x86-64/lib/ ' + GLOBAL_CFLAGS)
######################################################################
# PNACL + SEL_LDR [ARM]
######################################################################
# Locate the pnacl toolchain. Path can be overridden externally.
PNACL_TOOLCHAIN_LABEL = ''
if not 'PNACL_TOOLCHAIN_LABEL' in os.environ:
env_map = { 'linux2': 'linux', 'darwin': 'mac' }
PNACL_TOOLCHAIN_LABEL = 'pnacl_' + env_map[sys.platform] + '_x86'
else:
PNACL_TOOLCHAIN_LABEL = os.environ['PNACL_TOOLCHAIN_LABEL']
PNACL_ROOT = os.path.join('toolchain', PNACL_TOOLCHAIN_LABEL, 'newlib')
PNACL_FRONTEND = PNACL_ROOT + '/bin/pnacl-clang'
# NOTE: Our driver supports going from .c to .nexe in one go
# but it maybe useful to inspect the bitcode file so we
# split the compilation into two steps.
PNACL_LD = PNACL_ROOT + '/bin/pnacl-translate'
COMMANDS_llvm_pnacl_arm = [
('compile-pexe',
'%(CC)s %(src)s %(CFLAGS)s -o %(tmp)s.pexe -lm -lstdc++',
),
('translate-arm',
'%(LD)s %(tmp)s.pexe -o %(tmp)s.nexe',
),
('qemu-sel_ldr',
'%(EMU)s %(SEL_LDR)s -B %(IRT)s -Q %(tmp)s.nexe',
)
]
TOOLCHAIN_CONFIGS['llvm_pnacl_arm_O0'] = ToolchainConfig(
desc='pnacl llvm [arm]',
attributes=['arm', 'O0'],
commands=COMMANDS_llvm_pnacl_arm,
tools_needed=[PNACL_FRONTEND, PNACL_LD, EMU_SCRIPT, BOOTSTRAP_ARM,
SEL_LDR_ARM],
is_flaky = True,
CC = PNACL_FRONTEND,
LD = PNACL_LD + ' -arch arm',
EMU = EMU_SCRIPT,
SEL_LDR = RUN_SEL_LDR_ARM,
IRT = IRT_ARM,
CFLAGS = '-O0 -static ' + CLANG_CFLAGS + ' ' + GLOBAL_CFLAGS)
TOOLCHAIN_CONFIGS['llvm_pnacl_arm_O3'] = ToolchainConfig(
desc='pnacl llvm with optimizations [arm]',
attributes=['arm', 'O3'],
commands=COMMANDS_llvm_pnacl_arm,
tools_needed=[PNACL_FRONTEND, PNACL_LD, EMU_SCRIPT, BOOTSTRAP_ARM,
SEL_LDR_ARM],
is_flaky = True,
CC = PNACL_FRONTEND,
LD = PNACL_LD + ' -arch arm',
EMU = EMU_SCRIPT,
SEL_LDR = RUN_SEL_LDR_ARM,
IRT = IRT_ARM,
CFLAGS = '-O3 -D__OPTIMIZE__ -static ' + CLANG_CFLAGS + ' '
+ GLOBAL_CFLAGS)
######################################################################
# PNACL + SEL_LDR [X8632]
######################################################################
# NOTE: this is used for both x86 flavors
COMMANDS_llvm_pnacl_x86_O0 = [
('compile-pexe',
'%(CC)s %(src)s %(CFLAGS)s -o %(tmp)s.pexe -lm -lstdc++',
),
('translate-x86',
'%(LD)s %(tmp)s.pexe -o %(tmp)s.nexe ',
),
('sel_ldr',
'%(SEL_LDR)s -B %(IRT)s %(tmp)s.nexe',
)
]
TOOLCHAIN_CONFIGS['llvm_pnacl_x8632_O0'] = ToolchainConfig(
desc='pnacl llvm [x8632]',
attributes=['x86-32', 'O0'],
commands=COMMANDS_llvm_pnacl_x86_O0,
tools_needed=[PNACL_FRONTEND, PNACL_LD, BOOTSTRAP_X32, SEL_LDR_X32],
CC = PNACL_FRONTEND,
LD = PNACL_LD + ' -arch x86-32',
SEL_LDR = RUN_SEL_LDR_X32,
IRT = IRT_X32,
CFLAGS = '-O0 -static ' + CLANG_CFLAGS + ' ' + GLOBAL_CFLAGS)
TOOLCHAIN_CONFIGS['llvm_pnacl_x8632_O3'] = ToolchainConfig(
desc='pnacl llvm [x8632]',
attributes=['x86-32', 'O3'],
commands=COMMANDS_llvm_pnacl_x86_O0,
tools_needed=[PNACL_FRONTEND, PNACL_LD, BOOTSTRAP_X32, SEL_LDR_X32],
CC = PNACL_FRONTEND,
LD = PNACL_LD + ' -arch x86-32',
SEL_LDR = RUN_SEL_LDR_X32,
IRT = IRT_X32,
CFLAGS = '-O3 -D__OPTIMIZE__ -static ' + CLANG_CFLAGS + ' '
+ GLOBAL_CFLAGS)
######################################################################
# PNACL + SEL_LDR [X8664]
######################################################################
TOOLCHAIN_CONFIGS['llvm_pnacl_x8664_O0'] = ToolchainConfig(
desc='pnacl llvm [x8664]',
attributes=['x86-64', 'O0'],
commands=COMMANDS_llvm_pnacl_x86_O0,
tools_needed=[PNACL_FRONTEND, PNACL_LD, BOOTSTRAP_X64, SEL_LDR_X64],
CC = PNACL_FRONTEND,
LD = PNACL_LD + ' -arch x86-64',
SEL_LDR = RUN_SEL_LDR_X64,
IRT = IRT_X64,
CFLAGS = '-O0 -static ' + CLANG_CFLAGS + ' ' + GLOBAL_CFLAGS)
TOOLCHAIN_CONFIGS['llvm_pnacl_x8664_O3'] = ToolchainConfig(
desc='pnacl llvm [x8664]',
attributes=['x86-64', 'O3'],
commands=COMMANDS_llvm_pnacl_x86_O0,
tools_needed=[PNACL_FRONTEND, PNACL_LD, BOOTSTRAP_X64, SEL_LDR_X64],
CC = PNACL_FRONTEND,
LD = PNACL_LD + ' -arch x86-64',
SEL_LDR = RUN_SEL_LDR_X64,
IRT = IRT_X64,
CFLAGS = '-O3 -D__OPTIMIZE__ -static ' + CLANG_CFLAGS + ' '
+ GLOBAL_CFLAGS)
| bsd-3-clause | 3,555,109,398,028,804,000 | 32.480447 | 77 | 0.555398 | false |
claudep/django-calendarium | calendarium/tests/views_tests.py | 1 | 10165 | """Tests for the views of the ``calendarium`` app."""
# ! Never use the timezone now, import calendarium.utils.now instead always
# inaccuracy on microsecond base can negatively influence your tests
# from django.utils.timezone import now
from django.utils.timezone import timedelta
from django.test import TestCase
from django_libs.tests.factories import UserFactory
from django_libs.tests.mixins import ViewTestMixin
from calendarium.models import Event
from .factories import (
EventFactory,
EventCategoryFactory,
GroupFactory,
RuleFactory,
)
from calendarium.utils import now
class CalendariumRedirectViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``CalendariumRedirectView`` view."""
longMessage = True
def get_view_name(self):
return 'calendar_current_month'
def test_view(self):
resp = self.client.get(self.get_url())
self.assertEqual(resp.status_code, 301)
class MonthViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``MonthView`` view class."""
longMessage = True
def get_view_name(self):
return 'calendar_month'
def get_view_kwargs(self):
return {'year': self.year, 'month': self.month}
def setUp(self):
self.year = now().year
self.month = now().month
def test_view(self):
"""Test for the ``MonthView`` view class."""
# regular call
resp = self.is_callable()
self.assertEqual(
resp.template_name[0], 'calendarium/calendar_month.html', msg=(
'Returned the wrong template.'))
self.is_callable(method='POST', data={'next': True})
self.is_callable(method='POST', data={'previous': True})
self.is_callable(method='POST', data={'today': True})
# AJAX call
resp = self.client.get(
self.get_url(), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(
resp.template_name[0], 'calendarium/partials/calendar_month.html',
msg=('Returned the wrong template for AJAX request.'))
# called with a invalid category pk
resp = self.client.get('{0}?category=abc'.format(self.get_url()))
self.assertEqual(resp.status_code, 200)
# called with a non-existant category pk
resp = self.client.get('{0}?category=999'.format(self.get_url()))
self.assertEqual(resp.status_code, 200)
# called with a category pk
category = EventCategoryFactory()
resp = self.client.get('{0}?category={1}'.format(self.get_url(),
category.id))
self.assertEqual(resp.status_code, 200)
# called with wrong values
self.is_not_callable(kwargs={'year': 2000, 'month': 15})
class WeekViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``WeekView`` view class."""
longMessage = True
def get_view_name(self):
return 'calendar_week'
def get_view_kwargs(self):
return {'year': self.year, 'week': self.week}
def setUp(self):
self.year = now().year
# current week number
self.week = now().date().isocalendar()[1]
def test_view(self):
"""Tests for the ``WeekView`` view class."""
resp = self.is_callable()
self.assertEqual(
resp.template_name[0], 'calendarium/calendar_week.html', msg=(
'Returned the wrong template.'))
self.is_callable(method='POST', data={'next': True})
self.is_callable(method='POST', data={'previous': True})
self.is_callable(method='POST', data={'today': True})
resp = self.client.get(
self.get_url(), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(
resp.template_name[0], 'calendarium/partials/calendar_week.html',
msg=('Returned the wrong template for AJAX request.'))
self.is_not_callable(kwargs={'year': self.year, 'week': '60'})
class DayViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``DayView`` view class."""
longMessage = True
def get_view_name(self):
return 'calendar_day'
def get_view_kwargs(self):
return {'year': self.year, 'month': self.month, 'day': self.day}
def setUp(self):
self.year = 2001
self.month = 2
self.day = 15
def test_view(self):
"""Tests for the ``DayView`` view class."""
resp = self.is_callable()
self.assertEqual(
resp.template_name[0], 'calendarium/calendar_day.html', msg=(
'Returned the wrong template.'))
self.is_callable(method='POST', data={'next': True})
self.is_callable(method='POST', data={'previous': True})
self.is_callable(method='POST', data={'today': True})
resp = self.client.get(
self.get_url(), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(
resp.template_name[0], 'calendarium/partials/calendar_day.html',
msg=('Returned the wrong template for AJAX request.'))
self.is_not_callable(kwargs={'year': self.year, 'month': '14',
'day': self.day})
class EventUpdateViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``EventUpdateView`` view class."""
longMessage = True
def get_view_name(self):
return 'calendar_event_update'
def get_view_kwargs(self):
return {'pk': self.event.pk}
def setUp(self):
self.event = EventFactory()
self.user = UserFactory()
self.group = GroupFactory()
self.user.groups.add(self.group)
def test_view(self):
self.should_be_callable_when_authenticated(self.user)
class EventCreateViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``EventCreateView`` view class."""
longMessage = True
def get_view_name(self):
return 'calendar_event_create'
def setUp(self):
self.user = UserFactory()
self.group = GroupFactory()
self.user.groups.add(self.group)
def test_view(self):
self.should_be_callable_when_authenticated(self.user)
self.is_callable(data={'delete': True})
self.assertEqual(Event.objects.all().count(), 0)
class EventDetailViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``EventDetailView`` view class."""
longMessage = True
def get_view_name(self):
return 'calendar_event_detail'
def get_view_kwargs(self):
return {'pk': self.event.pk}
def setUp(self):
self.event = EventFactory()
def test_view(self):
self.is_callable()
class OccurrenceViewTestCaseMixin(object):
"""Mixin to avoid repeating code for the Occurrence views."""
longMessage = True
def get_view_kwargs(self):
return {
'pk': self.event.pk,
'year': self.event.start.date().year,
'month': self.event.start.date().month,
'day': self.event.start.date().day,
}
def setUp(self):
self.rule = RuleFactory(name='daily')
self.start = now() - timedelta(days=1)
self.end = now() + timedelta(days=5)
self.event = EventFactory(
rule=self.rule, end_recurring_period=now() + timedelta(days=2))
def test_view(self):
# regular test with a valid request
self.is_callable()
class OccurrenceDeleteViewTestCase(
OccurrenceViewTestCaseMixin, ViewTestMixin, TestCase):
"""Tests for the ``OccurrenceDeleteView`` view class."""
def get_view_name(self):
return 'calendar_occurrence_delete'
def test_deletion(self):
self.is_callable(method='post')
self.is_callable(kwargs={
'pk': self.event.pk,
'year': self.event.start.date().year,
'month': self.event.start.date().month,
'day': self.event.start.date().day + 1,
}, message=('Should be callable, if date in period.'))
self.is_not_callable(kwargs={
'pk': 5,
'year': self.event.start.date().year,
'month': self.event.start.date().month,
'day': self.event.start.date().day,
}, message=('Wrong event pk.'))
self.is_not_callable(kwargs={
'pk': self.event.pk,
'year': self.event.start.date().year,
'month': '999',
'day': self.event.start.date().day,
}, message=('Wrong dates.'))
new_rule = RuleFactory(name='weekly', frequency='WEEKLY')
new_event = EventFactory(
rule=new_rule,
end_recurring_period=now() + timedelta(days=200),
set__start=-5,
)
test_date = self.event.start.date() - timedelta(days=5)
self.is_not_callable(kwargs={
'pk': new_event.pk,
'year': test_date.year,
'month': test_date.month,
'day': test_date.day,
}, message=('No occurrence available for this day.'))
class OccurrenceDetailViewTestCase(
OccurrenceViewTestCaseMixin, ViewTestMixin, TestCase):
"""Tests for the ``OccurrenceDetailView`` view class."""
def get_view_name(self):
return 'calendar_occurrence_detail'
class OccurrenceUpdateViewTestCase(
OccurrenceViewTestCaseMixin, ViewTestMixin, TestCase):
"""Tests for the ``OccurrenceUpdateView`` view class."""
def get_view_name(self):
return 'calendar_occurrence_update'
class UpcomingEventsAjaxViewTestCase(ViewTestMixin, TestCase):
"""Tests for the ``UpcomingEventsAjaxView`` view class."""
def get_view_name(self):
return 'calendar_upcoming_events'
def test_view(self):
self.should_be_callable_when_anonymous()
def test_view_with_count(self):
url = self.get_url()
url = url + '?count=5'
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_view_with_category(self):
cat = EventCategoryFactory()
url = self.get_url()
url = url + '?category={0}'.format(cat.slug)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
| mit | -8,763,776,690,124,926,000 | 32.110749 | 78 | 0.609444 | false |
runt18/nupic | src/nupic/support/exceptions.py | 1 | 2930 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import traceback
class TimeoutError(Exception):
""" The requested operation timed out """
pass
class NupicJobFailException(Exception):
""" This exception signals that the Nupic job (e.g., Hypersearch, Production,
etc.) should be aborted due to the given error.
"""
def __init__(self, errorCode, msg):
"""
Parameters:
---------------------------------------------------------------------
errorCode: An error code from the support.errorcodes.ErrorCodes
enumeration
msg: Error message string
"""
self.__errorCode = errorCode
self.__msg = msg
super(JobFatalException, self).__init__(errorCode, msg)
return
def getWorkerCompletionMessage(self):
""" Generates a worker completion message that is suitable for the
worker_completion_message field in jobs table
Parameters:
---------------------------------------------------------------------
retval: The worker completion message appropriate for the
"worker_completion_message" field in jobs table
"""
msg = "{0!s}: {1!s}\n{2!s}".format(self.__errorCode, self.__msg, traceback.format_exc())
return msg
@classmethod
def mapCurrentException(cls, e, errorCode, msg):
""" Raises NupicJobFailException by mapping from another exception that
is being handled in the caller's scope and preserves the current exception's
traceback.
Parameters:
---------------------------------------------------------------------
e: The source exception
errorCode: An error code from the support.errorcodes.ErrorCodes
enumeration
msg: Error message string
"""
traceback = sys.exc_info()[2]
assert traceback is not None
newMsg = "{0!s}: {1!r}".format(msg, e)
e = NupicJobFailException(errorCode=errorCode, msg=newMsg)
raise e, None, traceback
| agpl-3.0 | -2,779,699,714,728,841,700 | 31.197802 | 92 | 0.601024 | false |
magicicada-bot/magicicada-server | magicicada/filesync/tests/test_admin.py | 1 | 2475 | # Copyright 2008-2015 Canonical
# Copyright 2015-2018 Chicharreros (https://launchpad.net/~chicharreros)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/magicicada-server
"""Tests for the admin features."""
from __future__ import unicode_literals
from magicicada.filesync import admin, services
from magicicada.testing.testcase import BaseTestCase
class AdminServicesTestCase(BaseTestCase):
"""Tests the admin module features."""
def _make_users(self):
"""Create users for tests."""
usernames = ['bob', 'bobby', 'inez', 'juan', 'tim']
for name in usernames:
services.make_storage_user(
username=name, max_storage_bytes=2 ** 30)
def test_StorageUserFinder(self):
"""Test the StorageUserFinder."""
users = admin.StorageUserFinder()
self.assertEqual(users.all(), [])
self.assertEqual(users.count(), 0)
self.assertEqual(users.is_empty(), True)
self._make_users()
# the returning object can be reused
self.assertEqual(len(users.all()), 5)
self.assertEqual(users.count(), 5)
self.assertEqual(users.is_empty(), False)
self.assertEqual(users[4].username, "tim")
users.filter = "BOB"
self.assertEqual(len(users.all()), 2)
self.assertEqual(users[0].username, "bob")
self.assertEqual(users[1].username, "bobby")
users.filter = "juan"
self.assertEqual(len(users.all()), 1)
self.assertTrue(isinstance(users[0], services.DAOStorageUser))
self.assertEqual(users[0].username, "juan")
# test slicing
users.filter = None
subset = users[2:4]
self.assertEqual(len(subset), 2)
self.assertEqual(subset[0].username, "inez")
self.assertEqual(subset[1].username, "juan")
| agpl-3.0 | 2,863,561,809,631,575,000 | 38.919355 | 74 | 0.672727 | false |
monkpit/pyfocas | FanucImplementation/Fwlib32_h.py | 1 | 13167 | # -*- coding: utf-8 -*-
""" Fwlib32_h.py
This file contains ctypes structures to match the data structures
found in the library header Fwlib32.h.
All classes contain `_pack_ = 4`; this comes from Fwlib32.h:
#pragma pack(push,4)
Don't unit test these because it would basically be running tests against
the ctypes module itself and not any of our own code.
Further documentation can be found in the FOCAS documentation.
Look up the documentation of the Equivalent data type.
For example, for documentation on "AlarmStatus", look up "ODBALM".
"""
import ctypes
"""Constants"""
MAX_AXIS = 32
"""int: The maximum number of axes a control will return"""
ALL_AXES = -1
"""int: A constant value to request that a function return all axes at once"""
DATAIO_ALARM_MASK = (0x1 << 2) | (0x1 << 7)
SERVO_ALARM_MASK = 0x1 << 6
MACRO_ALARM_MASK = 0x1 << 8
OVERHEAT_ALARM_MASK = 0x1 << 5
OVERTRAVEL_ALARM_MASK = 0x1 << 4
SPINDLE_ALARM_MASK = 0x1 << 9
"""bit masks to determine alarm status
take an alarm data and AND it with the mask
If the result is True the alarm is active
If it's False it's cleared.
For example, see: DriverImplementations.alarmStringBuilder
"""
class AlarmStatus(ctypes.Structure):
"""
Equivalent of ODBALM
"""
_pack_ = 4
_fields_ = [("dummy", ctypes.c_short * 2),
("data", ctypes.c_short), ]
ODBALM = AlarmStatus
class LoadElement(ctypes.Structure):
"""
Equivalent of LOADELM
"""
_pack_ = 4
_fields_ = [("data", ctypes.c_long),
("decimal", ctypes.c_short),
("unit", ctypes.c_short),
("name", ctypes.c_char),
("suffix1", ctypes.c_char),
("suffix2", ctypes.c_char),
("reserve", ctypes.c_char), ]
LOADELM = LoadElement
class ServoLoad(ctypes.Structure):
"""
Equivalent of ODBSVLOAD
"""
_pack_ = 4
_fields_ = [("load", LoadElement)]
ODBSVLOAD = ServoLoad
class SpindleLoad(ctypes.Structure):
"""
Equivalent of ODBSPLOAD
"""
_pack_ = 4
_fields_ = [("load", LoadElement),
("speed", LoadElement), ]
ODBSPLOAD = SpindleLoad
class StatInfo(ctypes.Structure):
_pack_ = 4
_fields_ = [("hdck", ctypes.c_short),
("tmmode", ctypes.c_short),
("auto", ctypes.c_short),
("run", ctypes.c_short),
("motion", ctypes.c_short),
("mstb", ctypes.c_short),
("estop", ctypes.c_short),
("alarm", ctypes.c_short),
("edit", ctypes.c_short), ]
@property
def __dict__(self):
# unreadable
return dict((f, getattr(self, f)) for f, _ in self._fields_)
class ModalAux(ctypes.Structure):
_pack_ = 4
_fields_ = [("aux_data", ctypes.c_long),
("flag1", ctypes.c_char),
("flag2", ctypes.c_char), ]
class ModalAuxUnion(ctypes.Union):
_pack_ = 4
_fields_ = [("g_data", ctypes.c_char),
("g_rdata", ctypes.c_char * 35),
("g_1shot", ctypes.c_char * 4),
("aux", ModalAux),
("raux1", ModalAux * 27),
("raux2", ModalAux * MAX_AXIS), ]
class ModalData(ctypes.Structure):
"""
Equivalent of ODBMDL
"""
_pack_ = 4
_fields_ = [("datano", ctypes.c_short),
("type", ctypes.c_short),
("modal", ModalAuxUnion), ]
ODBMDL = ModalData
class ExecutingProgram(ctypes.Structure):
"""
Equivalent of ODBEXEPRG
"""
_pack_ = 4
_fields_ = [("name", ctypes.c_char * 36),
("oNumber", ctypes.c_long), ]
ODBEXEPRG = ExecutingProgram
class AxisName(ctypes.Structure):
"""
Equivalent of ODBAXISNAME
"""
_pack_ = 4
_fields_ = [("name", ctypes.c_char),
("suffix", ctypes.c_char)]
ODBAXISNAME = AxisName
class AxisData(ctypes.Structure):
"""
Equivalent of ODBAXDT
"""
_pack_ = 4
_fields_ = [("axisName", ctypes.c_char * 4),
("position", ctypes.c_long),
("decimalPosition", ctypes.c_short),
("unit", ctypes.c_short),
("flag", ctypes.c_short),
("_reserved", ctypes.c_short), ]
ODBAXDT = AxisData
class AlarmRecord(ctypes.Structure):
_pack_ = 4
_fields_ = [("recordType", ctypes.c_short),
("alarmGroup", ctypes.c_short),
("alarmNumber", ctypes.c_short),
("axis", ctypes.c_byte),
("_AlarmRecord_dummy", ctypes.c_byte)]
class MDIRecord(ctypes.Structure):
_pack_ = 4
_fields_ = [("recordType", ctypes.c_short),
("keycode", ctypes.c_byte),
("powerFlag", ctypes.c_byte),
("_MDIRecord_dummy", ctypes.c_char * 4), ]
class SignalRecord(ctypes.Structure):
_pack_ = 4
_fields_ = [("recordType", ctypes.c_short),
("signalName", ctypes.c_byte),
("oldSignal", ctypes.c_byte),
("newSignal", ctypes.c_byte),
("_SignalRecord_dummy", ctypes.c_byte),
("signalNumber", ctypes.c_short), ]
class DateOrPower(ctypes.Structure):
_pack_ = 4
_fields_ = [("recordType", ctypes.c_short),
("year", ctypes.c_byte),
("month", ctypes.c_byte),
("day", ctypes.c_byte),
("powerFlag", ctypes.c_byte),
("_DateOrPower_dummy", ctypes.c_byte * 2)]
class OperationHistoryDataUnion(ctypes.Union):
"""
Union for operation history data
"""
_pack_ = 4
_fields_ = [("alarm", AlarmRecord),
("mdi", MDIRecord),
("signal", SignalRecord),
("dateOrPower", DateOrPower), ]
class OperationHistory(ctypes.Structure):
"""
Equivalent of ODBHIS
"""
_pack_ = 4
_fields_ = [("startNumber", ctypes.c_ushort),
("_ODBHIS_type", ctypes.c_short),
("endNumber", ctypes.c_ushort),
("data", OperationHistoryDataUnion * 10)]
ODBHIS = OperationHistory
class ProgramDirectory2(ctypes.Structure):
"""
Equivalent of PRGDIR2
"""
_pack_ = 4
_fields_ = [("number", ctypes.c_short),
("length", ctypes.c_long),
("comment", ctypes.c_char * 51),
("_ProgramDirectory2_dummy", ctypes.c_char), ]
PRGDIR2 = ProgramDirectory2
class PanelSignals150(ctypes.Structure):
"""
Equivalent of IODBSGNL with less data
"""
_pack_ = 4
_fields_ = [("_PanelSignals150_dummy", ctypes.c_short), # dummy
("type", ctypes.c_short), # data select flag
("mode", ctypes.c_short), # mode signal
("manualFeedAxis", ctypes.c_short), # Manual handle feed axis selection signal
("manualFeedDistance", ctypes.c_short), # Manual handle feed travel distance selection signal
("rapidOverride", ctypes.c_short), # rapid traverse override signal
("jogOverride", ctypes.c_short), # manual feedrate override signal
("feedOverride", ctypes.c_short), # feedrate override signal
("spindleOverride", ctypes.c_short), # (not used)
("blockDelete", ctypes.c_short), # optional block skip signal
("singleBlock", ctypes.c_short), # single block signal
("machineLock", ctypes.c_short), # machine lock signal
("dryRun", ctypes.c_short), # dry run signal
("memoryProtection", ctypes.c_short), # memory protection signal
("feedHold", ctypes.c_short), # automatic operation halt signal
("manualRapid", ctypes.c_short), # (not used)
("_PanelSignals150_dummy2", ctypes.c_short * 2), ] # dummy
class PanelSignals160(ctypes.Structure):
"""
Equivalent of IODBSGNL
"""
_pack_ = 4
_fields_ = [("_PanelSignals160_dummy", ctypes.c_short), # dummy
("type", ctypes.c_short), # data select flag
("mode", ctypes.c_short), # mode signal
("manualFeedAxis", ctypes.c_short), # Manual handle feed axis selection signal
("manualFeedDistance", ctypes.c_short), # Manual handle feed travel distance selection signal
("rapidOverride", ctypes.c_short), # rapid traverse override signal
("jogOverride", ctypes.c_short), # manual feedrate override signal
("feedOverride", ctypes.c_short), # feedrate override signal
("spindleOverride", ctypes.c_short), # (not used)
("blockDelete", ctypes.c_short), # optional block skip signal
("singleBlock", ctypes.c_short), # single block signal
("machineLock", ctypes.c_short), # machine lock signal
("dryRun", ctypes.c_short), # dry run signal
("memoryProtection", ctypes.c_short), # memory protection signal
("feedHold", ctypes.c_short),] # automatic operation halt signal
IODBSGNL = PanelSignals160
class PMCData(ctypes.Structure):
"""
Actual PMC values that were read
Used to replace anonymous struct in IODBPMC called "u"
"""
_pack_ = 1
_fields_ = [("cdata", ctypes.c_byte * 5),
("idata", ctypes.c_short * 5),
("ldata", ctypes.c_byte * 5), ]
@property
def pmcValue(self):
if self.cdata[0] < 0:
self.cdata[0] = -self.cdata[0] - 1
return self.cdata[0]
class PMC(ctypes.Structure):
"""
A data structure to hold values read from PMC addresses
Equivalent of IODBPMC
"""
_pack_ = 4
_fields_ = [("addressType", ctypes.c_short),
("dataType", ctypes.c_short),
("startAddress", ctypes.c_short),
("endAddress", ctypes.c_short),
("data", PMCData), ]
IODBPMC = PMC
class FAxis(ctypes.Structure):
_pack_ = 4
_fields_ = [("_absolute", ctypes.c_long * MAX_AXIS),
("_machine", ctypes.c_long * MAX_AXIS),
("_relative", ctypes.c_long * MAX_AXIS),
("_distance", ctypes.c_long * MAX_AXIS), ]
@property
def __dict__(self):
# unreadable
return dict((f, [x for x in getattr(self, f)])
for (f, _) in self._fields_)
# return {"absolute": self.absolute,
# "machine": self.machine,
# "relative": self.relative,
# "distance": self.distance}
class OAxis(ctypes.Structure):
_pack_ = 4
_fields_ = [("absolute", ctypes.c_long),
("machine", ctypes.c_long),
("relative", ctypes.c_long),
("distance", ctypes.c_long), ]
@property
def __dict__(self):
# unreadable
return dict((f, getattr(self, f)) for f, _ in self._fields_)
class PositionUnion(ctypes.Union):
"""
Alias for the anonymous union "pos" defined in some fwlib32 structures
"""
_pack_ = 4
_fields_ = [("_faxis", FAxis),
("_oaxis", OAxis), ]
@property
def __dict__(self):
# unreadable
return dict([("faxis", self._faxis.__dict__),
("oaxis", self._oaxis.__dict__)])
class DynamicResult(ctypes.Structure):
"""
Alias for ODBDY2 because what does that even mean
"""
_pack_ = 4
_fields_ = [("_DynamicResult_dummy", ctypes.c_short),
("axis", ctypes.c_short),
("alarm", ctypes.c_long),
("programNumber", ctypes.c_long),
("mainProgramNumber", ctypes.c_long),
("sequenceNumber", ctypes.c_long),
("actualFeed", ctypes.c_long),
("actualSpindleSpeed", ctypes.c_long),
("position", PositionUnion), ]
@property
def __dict__(self):
# unreadable
return dict((f, getattr(self, f)) for f, _ in self._fields_)
ODBDY2 = DynamicResult
class IDBPMMGTI(ctypes.Structure):
"""
Equivalent of IDBPMMGTI in FOCAS documentation
"""
_pack_ = 4
_fields_ = [("top", ctypes.c_long),
("num", ctypes.c_long), ]
class ODBPMMGET(ctypes.Structure):
"""
Equivalent of ODBPMMGET in FOCAS documentation
"""
_pack_ = 4
_fields_ = [("position", ctypes.c_long),
("actualFeed", ctypes.c_long),
("data", ctypes.c_long * 20),
("number", ctypes.c_long * 20),
("axis", ctypes.c_short * 20),
("type", ctypes.c_short * 20),
("alarmAxis", ctypes.c_char * 40),
("alarmNumber", ctypes.c_ushort * 40),
("channel", ctypes.c_long),
("group", ctypes.c_long), ]
class ProgramData(ctypes.Structure):
"""
Equivalent of ODBPRO
"""
_pack_ = 4
_fields_ = [("dummy", ctypes.c_short * 2),
("program", ctypes.c_long),
("mainProgram", ctypes.c_long)]
ODBPRO = ProgramData
| mit | 8,969,923,093,926,644,000 | 28.925 | 111 | 0.540214 | false |
JMSwag/jms-utils | tests/test_compat.py | 1 | 1622 | # --------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2014-2016 Digital Sapphire
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# --------------------------------------------------------------------------
import six
from jms_utils.compat import make_compat_str
def test_make_compat_str():
byte_str = b"Give me some bytes"
assert isinstance(make_compat_str(byte_str), six.string_types)
assert isinstance(make_compat_str('Another string'), six.string_types)
assert isinstance(make_compat_str(u'unicode string'), six.string_types)
| mit | -2,722,114,922,590,430,700 | 48.151515 | 79 | 0.694821 | false |
edx/edx-enterprise | tests/test_enterprise/management/test_email_drip_for_missing_dsc_records.py | 1 | 5469 | # -*- coding: utf-8 -*-
"""
Tests for the django management command `email_drip_for_missing_dsc_records`.
"""
import random
from datetime import timedelta
import mock
from pytest import mark
from testfixtures import LogCapture
from django.core.management import call_command
from django.test import TestCase
from django.utils import timezone
from consent.models import DataSharingConsent, ProxyDataSharingConsent
from test_utils.factories import (
EnterpriseCourseEnrollmentFactory,
EnterpriseCustomerFactory,
EnterpriseCustomerUserFactory,
UserFactory,
)
LOGGER_NAME = 'enterprise.management.commands.email_drip_for_missing_dsc_records'
@mark.django_db
class EmailDripForMissingDscRecordsCommandTests(TestCase):
"""
Test command `email_drip_for_missing_dsc_records`.
"""
command = 'email_drip_for_missing_dsc_records'
def create_enrollments(self, num_learners, enrollment_date):
"""
Create test users and enrollments in database
"""
course_ids = [
'course-v1:edX+DemoX+Demo_Course',
'course-v1:edX+Python+1T2019',
'course-v1:edX+React+2T2019',
]
enterprise_customer = EnterpriseCustomerFactory(
name='Starfleet Academy',
enable_data_sharing_consent=True,
enforce_data_sharing_consent='at_enrollment',
)
learners = []
for __ in range(num_learners):
user = UserFactory.create(is_staff=False, is_active=True)
learners.append(user)
learners_data = []
for learner in learners:
course_id = random.choice(course_ids)
learners_data.append(
{
'ENTERPRISE_UUID': enterprise_customer.uuid,
'EMAIL': learner.email,
'USERNAME': learner.username,
'USER_ID': learner.id,
'COURSE_ID': course_id
}
)
enterprise_customer_user = EnterpriseCustomerUserFactory(
user_id=learner.id,
enterprise_customer=enterprise_customer
)
enterprise_course_enrollment = EnterpriseCourseEnrollmentFactory(
enterprise_customer_user=enterprise_customer_user,
course_id=course_id,
)
enterprise_course_enrollment.created = enrollment_date
enterprise_course_enrollment.save()
def setUp(self):
super().setUp()
today = timezone.now().date()
self.create_enrollments(num_learners=3, enrollment_date=today - timedelta(days=1))
self.create_enrollments(num_learners=5, enrollment_date=today - timedelta(days=10))
@mock.patch(
'enterprise.management.commands.email_drip_for_missing_dsc_records.DataSharingConsent.objects.proxied_get'
)
@mock.patch('enterprise.management.commands.email_drip_for_missing_dsc_records.utils.track_event')
@mock.patch('enterprise.management.commands.email_drip_for_missing_dsc_records.Command._get_course_properties')
@mock.patch('enterprise.management.commands.email_drip_for_missing_dsc_records.is_course_accessed')
def test_email_drip_for_missing_dsc_records(
self,
mock_is_course_accessed,
mock_get_course_properties,
mock_event_track,
mock_dsc_proxied_get
):
"""
Test that email drip event is fired for missing DSC records
"""
mock_get_course_properties.return_value = 'test_url', 'test_course'
mock_is_course_accessed.return_value = True
# test when consent is present
with LogCapture(LOGGER_NAME) as log:
mock_dsc_proxied_get.return_value = DataSharingConsent()
call_command(self.command)
self.assertEqual(mock_event_track.call_count, 0)
self.assertIn(
'[Absent DSC Email] Emails sent for [0] enrollments out of [3] enrollments.',
log.records[-1].message
)
# test when consent is missing, with --no-commit param
with LogCapture(LOGGER_NAME) as log:
mock_dsc_proxied_get.return_value = ProxyDataSharingConsent()
call_command(self.command, '--no-commit')
self.assertEqual(mock_event_track.call_count, 0)
self.assertIn(
'[Absent DSC Email] Emails sent for [3] enrollments out of [3] enrollments.',
log.records[-1].message
)
# test when consent is missing, without passing --no-commit param
with LogCapture(LOGGER_NAME) as log:
call_command(self.command)
self.assertEqual(mock_event_track.call_count, 3)
self.assertIn(
'[Absent DSC Email] Emails sent for [3] enrollments out of [3] enrollments.',
log.records[-1].message
)
mock_event_track.reset_mock()
# test with --enrollment-before param
enrollment_before_date = (timezone.now().date() - timedelta(days=5)).isoformat()
with LogCapture(LOGGER_NAME) as log:
call_command(self.command, '--enrollment-before', enrollment_before_date)
self.assertEqual(mock_event_track.call_count, 5)
self.assertIn(
'[Absent DSC Email] Emails sent for [5] enrollments out of [5] enrollments.',
log.records[-1].message
)
| agpl-3.0 | 3,555,484,826,017,474,600 | 38.064286 | 115 | 0.620589 | false |
alvin777/excelsior | DP/backpack.py | 1 | 2615 | #!/bin/python
import logging
import unittest
WEIGHT = 0
VALUE = 1
ITEMS_LIST = 2
def calc_items_to_take(items_list, weight):
weight_list = [] # (weight, value, items_list)
for current_weight in xrange(0, weight+1):
total_weight = 0
total_value = 0
taken_items_list = []
max_total_value = 0
logging.debug("current_weight: %d", current_weight)
for item in items_list:
logging.debug(" item: %s", item)
item_weight = item[0]
item_value = item[1]
if item_weight > current_weight:
logging.debug(" skipping")
continue
weight_list_record = weight_list[current_weight - item_weight]
total_value = item_value + weight_list_record[VALUE]
if total_value > max_total_value:
max_total_value = total_value
total_weight = weight_list_record[WEIGHT] + item_weight
taken_items_list = weight_list_record[ITEMS_LIST] + [item]
logging.debug(" choosing, max_total_value: %d, total_weight: %d, taken_items_list: %s", max_total_value, total_weight, taken_items_list)
weight_list.append((total_weight, max_total_value, taken_items_list))
return weight_list[weight]
def calc_value(items_list, weight):
m = [0 for _ in xrange(weight + 1)]
keep = [[0 for _ in xrange(len(items_list))] for _ in xrange(weight+1)]
print "\n".join(map(str,keep))
for w in xrange(1, weight + 1):
# m[w] = max([m[w-1]] + [item[VALUE] + m[w - item[WEIGHT]] for item in items_list if item[WEIGHT] <= w])
for i in xrange(len(items_list)):
item = items_list[i]
if item[WEIGHT] <= w and item[VALUE] + m[w - item[WEIGHT]] > m[w]:
m[w] = item[VALUE] + m[w - item[WEIGHT]]
keep[w] = keep[w - item[WEIGHT]]
keep[w][i] += 1
logging.debug("%s", m)
print "\n".join(map(str,keep))
return m[weight]
class TestBackpack(unittest.TestCase):
# def test_simple(self):
# items_list = ((10, 60),
# (20, 130),
# (30, 120))
# weight_list = calc_items_to_take(items_list, 100)
# print weight_list
def test_simple2(self):
items_list = ((10, 60),
(20, 130),
(30, 120))
value = calc_value(items_list, 100)
print value
logging.basicConfig(format='%(levelname)-7s %(message)s', level=logging.DEBUG)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 1,352,507,499,024,379,400 | 27.736264 | 155 | 0.540727 | false |
Fe-Nik-S/Examples | python/patterns/behavioral/iterator.py | 1 | 1040 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
#
# ---------------------------------------------------------------------
# Copyright (C) 2017-2018 The --- Project
# See LICENSE for details
# ---------------------------------------------------------------------
class Fibonacci(object):
def __init__(self, count_to):
self._count_to = count_to
def __iter__(self):
self._current = 0
self._next = 1
return self
def __next__(self):
result = self._current
if self._current > self._count_to:
raise StopIteration
self._current, self._next = self._next, self._current + self._next
return result
if __name__ == "__main__":
count_to = 100
print("Fibonacci sequence values up to {}:".format(count_to))
fib_iterator = Fibonacci(600)
for _ in fib_iterator:
print(_, end=" ")
### OUTPUT ###
# Fibonacci sequence values up to 100:
# 0 1 1 2 3 5 8 13 21 34 55 89 144 233 377
| mit | -1,968,337,526,883,203,800 | 26.157895 | 74 | 0.443798 | false |
mylokin/schematec | tests/test_converter_integer.py | 1 | 1712 | from __future__ import absolute_import
import pytest
import schematec.converters as converters
import schematec.exc as exc
def test_zero():
assert converters.integer(0) == 0
def test_positive_number():
assert converters.integer(1) == 1
def test_negative_number():
assert converters.integer(-1) == -1
def test_long():
assert converters.integer(1L) == 1
def test_long_type():
assert isinstance(converters.integer(1L), int)
def test_positive_number_string():
assert converters.integer('1') == 1
def test_positive_number_string_type():
assert isinstance(converters.integer('1'), int)
def test_negative_number_string():
assert converters.integer('-1') == -1
def test_positive_number_unicode_string():
assert converters.integer(u'1') == 1
def test_positive_number_unicode_string_type():
assert isinstance(converters.integer(u'1'), int)
def test_negative_number_unicode_string():
assert converters.integer(u'-1') == -1
def test_not_number_string():
with pytest.raises(exc.ConvertationError):
converters.integer('a')
def test_empty_string():
with pytest.raises(exc.ConvertationError):
converters.integer('')
def test_none():
with pytest.raises(exc.ConvertationError):
converters.integer(None)
def test_boolean_true():
assert converters.integer(True) == 1
def test_boolean_true_type():
assert isinstance(converters.integer(True), int)
def test_boolean_false():
assert converters.integer(False) == 0
def test_boolean_false_type():
assert isinstance(converters.integer(False), int)
def test_unknown_type():
with pytest.raises(exc.ConvertationError):
converters.integer(type)
| mit | 3,394,978,595,057,186,300 | 18.906977 | 53 | 0.699766 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-datafactory/azure/mgmt/datafactory/operations/pipelines_operations.py | 1 | 18340 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class PipelinesOperations(object):
"""PipelinesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version. Constant value: "2017-09-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01-preview"
self.config = config
def list_by_factory(
self, resource_group_name, factory_name, custom_headers=None, raw=False, **operation_config):
"""Lists pipelines.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PipelineResource
:rtype:
~azure.mgmt.datafactory.models.PipelineResourcePaged[~azure.mgmt.datafactory.models.PipelineResource]
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelines'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.PipelineResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PipelineResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, factory_name, pipeline_name, pipeline, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a pipeline.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param pipeline_name: The pipeline name.
:type pipeline_name: str
:param pipeline: Pipeline resource definition.
:type pipeline: ~azure.mgmt.datafactory.models.PipelineResource
:param if_match: ETag of the pipeline entity. Should only be
specified for update, for which it should match existing entity or can
be * for unconditional update.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PipelineResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datafactory.models.PipelineResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelines/{pipelineName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'pipelineName': self._serialize.url("pipeline_name", pipeline_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(pipeline, 'PipelineResource')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PipelineResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, resource_group_name, factory_name, pipeline_name, custom_headers=None, raw=False, **operation_config):
"""Gets a pipeline.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param pipeline_name: The pipeline name.
:type pipeline_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PipelineResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datafactory.models.PipelineResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelines/{pipelineName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'pipelineName': self._serialize.url("pipeline_name", pipeline_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PipelineResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, factory_name, pipeline_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a pipeline.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param pipeline_name: The pipeline name.
:type pipeline_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelines/{pipelineName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'pipelineName': self._serialize.url("pipeline_name", pipeline_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def create_run(
self, resource_group_name, factory_name, pipeline_name, parameters=None, custom_headers=None, raw=False, **operation_config):
"""Creates a run of a pipeline.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param pipeline_name: The pipeline name.
:type pipeline_name: str
:param parameters: Parameters of the pipeline run.
:type parameters: dict[str, object]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CreateRunResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datafactory.models.CreateRunResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelines/{pipelineName}/createRun'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'pipelineName': self._serialize.url("pipeline_name", pipeline_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if parameters is not None:
body_content = self._serialize.body(parameters, '{object}')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('CreateRunResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| mit | -5,748,974,292,146,722,000 | 48.433962 | 173 | 0.642966 | false |
asfin/electrum | electrum/gui/qt/network_dialog.py | 1 | 20251 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import PyQt5.QtCore as QtCore
from electrum.i18n import _
from electrum import constants
from electrum.util import print_error
from electrum.network import serialize_server, deserialize_server
from .util import *
protocol_names = ['TCP', 'SSL']
protocol_letters = 'ts'
class NetworkDialog(QDialog):
def __init__(self, network, config, network_updated_signal_obj):
QDialog.__init__(self)
self.setWindowTitle(_('Network'))
self.setMinimumSize(500, 20)
self.nlayout = NetworkChoiceLayout(network, config)
self.network_updated_signal_obj = network_updated_signal_obj
vbox = QVBoxLayout(self)
vbox.addLayout(self.nlayout.layout())
vbox.addLayout(Buttons(CloseButton(self)))
self.network_updated_signal_obj.network_updated_signal.connect(
self.on_update)
network.register_callback(self.on_network, ['updated', 'interfaces'])
def on_network(self, event, *args):
self.network_updated_signal_obj.network_updated_signal.emit(event, args)
def on_update(self):
self.nlayout.update()
class NodesListWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Connected node'), _('Height')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
is_server = not bool(item.data(0, Qt.UserRole))
menu = QMenu()
if is_server:
server = item.data(1, Qt.UserRole)
menu.addAction(_("Use as server"), lambda: self.parent.follow_server(server))
else:
index = item.data(1, Qt.UserRole)
menu.addAction(_("Follow this branch"), lambda: self.parent.follow_branch(index))
menu.exec_(self.viewport().mapToGlobal(position))
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, network):
self.clear()
self.addChild = self.addTopLevelItem
chains = network.get_blockchains()
n_chains = len(chains)
for k, items in chains.items():
b = network.blockchains[k]
name = b.get_name()
if n_chains >1:
x = QTreeWidgetItem([name + '@%d'%b.get_forkpoint(), '%d'%b.height()])
x.setData(0, Qt.UserRole, 1)
x.setData(1, Qt.UserRole, b.forkpoint)
else:
x = self
for i in items:
star = ' *' if i == network.interface else ''
item = QTreeWidgetItem([i.host + star, '%d'%i.tip])
item.setData(0, Qt.UserRole, 0)
item.setData(1, Qt.UserRole, i.server)
x.addChild(item)
if n_chains>1:
self.addTopLevelItem(x)
x.setExpanded(True)
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
class ServerListWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Host'), _('Port')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
menu = QMenu()
server = item.data(1, Qt.UserRole)
menu.addAction(_("Use as server"), lambda: self.set_server(server))
menu.exec_(self.viewport().mapToGlobal(position))
def set_server(self, s):
host, port, protocol = deserialize_server(s)
self.parent.server_host.setText(host)
self.parent.server_port.setText(port)
self.parent.set_server()
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, servers, protocol, use_tor):
self.clear()
for _host, d in sorted(servers.items()):
if _host.endswith('.onion') and not use_tor:
continue
port = d.get(protocol)
if port:
x = QTreeWidgetItem([_host, port])
server = serialize_server(_host, port, protocol)
x.setData(1, Qt.UserRole, server)
self.addTopLevelItem(x)
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
class NetworkChoiceLayout(object):
def __init__(self, network, config, wizard=False):
self.network = network
self.config = config
self.protocol = None
self.tor_proxy = None
self.tabs = tabs = QTabWidget()
server_tab = QWidget()
proxy_tab = QWidget()
blockchain_tab = QWidget()
tabs.addTab(blockchain_tab, _('Overview'))
tabs.addTab(server_tab, _('Server'))
tabs.addTab(proxy_tab, _('Proxy'))
# server tab
grid = QGridLayout(server_tab)
grid.setSpacing(8)
self.server_host = QLineEdit()
self.server_host.setFixedWidth(200)
self.server_port = QLineEdit()
self.server_port.setFixedWidth(60)
self.autoconnect_cb = QCheckBox(_('Select server automatically'))
self.autoconnect_cb.setEnabled(self.config.is_modifiable('auto_connect'))
self.server_host.editingFinished.connect(self.set_server)
self.server_port.editingFinished.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.update)
msg = ' '.join([
_("If auto-connect is enabled, Electrum will always use a server that is on the longest blockchain."),
_("If it is disabled, you have to choose a server you want to use. Electrum will warn you if your server is lagging.")
])
grid.addWidget(self.autoconnect_cb, 0, 0, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
grid.addWidget(QLabel(_('Server') + ':'), 1, 0)
grid.addWidget(self.server_host, 1, 1, 1, 2)
grid.addWidget(self.server_port, 1, 3)
label = _('Server peers') if network.is_connected() else _('Default Servers')
grid.addWidget(QLabel(label), 2, 0, 1, 5)
self.servers_list = ServerListWidget(self)
grid.addWidget(self.servers_list, 3, 0, 1, 5)
# Proxy tab
grid = QGridLayout(proxy_tab)
grid.setSpacing(8)
# proxy setting
self.proxy_cb = QCheckBox(_('Use proxy'))
self.proxy_cb.clicked.connect(self.check_disable_proxy)
self.proxy_cb.clicked.connect(self.set_proxy)
self.proxy_mode = QComboBox()
self.proxy_mode.addItems(['SOCKS4', 'SOCKS5', 'HTTP'])
self.proxy_host = QLineEdit()
self.proxy_host.setFixedWidth(200)
self.proxy_port = QLineEdit()
self.proxy_port.setFixedWidth(60)
self.proxy_user = QLineEdit()
self.proxy_user.setPlaceholderText(_("Proxy user"))
self.proxy_password = QLineEdit()
self.proxy_password.setPlaceholderText(_("Password"))
self.proxy_password.setEchoMode(QLineEdit.Password)
self.proxy_password.setFixedWidth(60)
self.proxy_mode.currentIndexChanged.connect(self.set_proxy)
self.proxy_host.editingFinished.connect(self.set_proxy)
self.proxy_port.editingFinished.connect(self.set_proxy)
self.proxy_user.editingFinished.connect(self.set_proxy)
self.proxy_password.editingFinished.connect(self.set_proxy)
self.proxy_mode.currentIndexChanged.connect(self.proxy_settings_changed)
self.proxy_host.textEdited.connect(self.proxy_settings_changed)
self.proxy_port.textEdited.connect(self.proxy_settings_changed)
self.proxy_user.textEdited.connect(self.proxy_settings_changed)
self.proxy_password.textEdited.connect(self.proxy_settings_changed)
self.tor_cb = QCheckBox(_("Use Tor Proxy"))
self.tor_cb.setIcon(QIcon(":icons/tor_logo.png"))
self.tor_cb.hide()
self.tor_cb.clicked.connect(self.use_tor_proxy)
grid.addWidget(self.tor_cb, 1, 0, 1, 3)
grid.addWidget(self.proxy_cb, 2, 0, 1, 3)
grid.addWidget(HelpButton(_('Proxy settings apply to all connections: with Electrum servers, but also with third-party services.')), 2, 4)
grid.addWidget(self.proxy_mode, 4, 1)
grid.addWidget(self.proxy_host, 4, 2)
grid.addWidget(self.proxy_port, 4, 3)
grid.addWidget(self.proxy_user, 5, 2)
grid.addWidget(self.proxy_password, 5, 3)
grid.setRowStretch(7, 1)
# Blockchain Tab
grid = QGridLayout(blockchain_tab)
msg = ' '.join([
_("Electrum connects to several nodes in order to download block headers and find out the longest blockchain."),
_("This blockchain is used to verify the transactions sent by your transaction server.")
])
self.status_label = QLabel('')
grid.addWidget(QLabel(_('Status') + ':'), 0, 0)
grid.addWidget(self.status_label, 0, 1, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
self.server_label = QLabel('')
msg = _("Electrum sends your wallet addresses to a single server, in order to receive your transaction history.")
grid.addWidget(QLabel(_('Server') + ':'), 1, 0)
grid.addWidget(self.server_label, 1, 1, 1, 3)
grid.addWidget(HelpButton(msg), 1, 4)
self.height_label = QLabel('')
msg = _('This is the height of your local copy of the blockchain.')
grid.addWidget(QLabel(_('Blockchain') + ':'), 2, 0)
grid.addWidget(self.height_label, 2, 1)
grid.addWidget(HelpButton(msg), 2, 4)
self.split_label = QLabel('')
grid.addWidget(self.split_label, 3, 0, 1, 3)
self.nodes_list_widget = NodesListWidget(self)
grid.addWidget(self.nodes_list_widget, 5, 0, 1, 5)
vbox = QVBoxLayout()
vbox.addWidget(tabs)
self.layout_ = vbox
# tor detector
self.td = td = TorDetector()
td.found_proxy.connect(self.suggest_proxy)
td.start()
self.fill_in_proxy_settings()
self.update()
def check_disable_proxy(self, b):
if not self.config.is_modifiable('proxy'):
b = False
for w in [self.proxy_mode, self.proxy_host, self.proxy_port, self.proxy_user, self.proxy_password]:
w.setEnabled(b)
def enable_set_server(self):
if self.config.is_modifiable('server'):
enabled = not self.autoconnect_cb.isChecked()
self.server_host.setEnabled(enabled)
self.server_port.setEnabled(enabled)
self.servers_list.setEnabled(enabled)
else:
for w in [self.autoconnect_cb, self.server_host, self.server_port, self.servers_list]:
w.setEnabled(False)
def update(self):
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host.setText(host)
self.server_port.setText(port)
self.autoconnect_cb.setChecked(auto_connect)
interface = self.network.interface
host = interface.host if interface else _('None')
self.server_label.setText(host)
self.set_protocol(protocol)
self.servers = self.network.get_servers()
self.servers_list.update(self.servers, self.protocol, self.tor_cb.isChecked())
self.enable_set_server()
height_str = "%d "%(self.network.get_local_height()) + _('blocks')
self.height_label.setText(height_str)
n = len(self.network.get_interfaces())
status = _("Connected to {0} nodes.").format(n) if n else _("Not connected")
self.status_label.setText(status)
chains = self.network.get_blockchains()
if len(chains)>1:
chain = self.network.blockchain()
forkpoint = chain.get_forkpoint()
name = chain.get_name()
msg = _('Chain split detected at block {0}').format(forkpoint) + '\n'
msg += (_('You are following branch') if auto_connect else _('Your server is on branch'))+ ' ' + name
msg += ' (%d %s)' % (chain.get_branch_size(), _('blocks'))
else:
msg = ''
self.split_label.setText(msg)
self.nodes_list_widget.update(self.network)
def fill_in_proxy_settings(self):
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
if not proxy_config:
proxy_config = {"mode": "none", "host": "localhost", "port": "9050"}
b = proxy_config.get('mode') != "none"
self.check_disable_proxy(b)
if b:
self.proxy_cb.setChecked(True)
self.proxy_mode.setCurrentIndex(
self.proxy_mode.findText(str(proxy_config.get("mode").upper())))
self.proxy_host.setText(proxy_config.get("host"))
self.proxy_port.setText(proxy_config.get("port"))
self.proxy_user.setText(proxy_config.get("user", ""))
self.proxy_password.setText(proxy_config.get("password", ""))
def layout(self):
return self.layout_
def set_protocol(self, protocol):
if protocol != self.protocol:
self.protocol = protocol
def change_protocol(self, use_ssl):
p = 's' if use_ssl else 't'
host = self.server_host.text()
pp = self.servers.get(host, constants.net.DEFAULT_PORTS)
if p not in pp.keys():
p = list(pp.keys())[0]
port = pp[p]
self.server_host.setText(host)
self.server_port.setText(port)
self.set_protocol(p)
self.set_server()
def follow_branch(self, index):
self.network.follow_chain(index)
self.update()
def follow_server(self, server):
self.network.switch_to_interface(server)
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
host, port, protocol = deserialize_server(server)
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
self.update()
def server_changed(self, x):
if x:
self.change_server(str(x.text(0)), self.protocol)
def change_server(self, host, protocol):
pp = self.servers.get(host, constants.net.DEFAULT_PORTS)
if protocol and protocol not in protocol_letters:
protocol = None
if protocol:
port = pp.get(protocol)
if port is None:
protocol = None
if not protocol:
if 's' in pp.keys():
protocol = 's'
port = pp.get(protocol)
else:
protocol = list(pp.keys())[0]
port = pp.get(protocol)
self.server_host.setText(host)
self.server_port.setText(port)
def accept(self):
pass
def set_server(self):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
host = str(self.server_host.text())
port = str(self.server_port.text())
auto_connect = self.autoconnect_cb.isChecked()
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
def set_proxy(self):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
if self.proxy_cb.isChecked():
proxy = { 'mode':str(self.proxy_mode.currentText()).lower(),
'host':str(self.proxy_host.text()),
'port':str(self.proxy_port.text()),
'user':str(self.proxy_user.text()),
'password':str(self.proxy_password.text())}
else:
proxy = None
self.tor_cb.setChecked(False)
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
def suggest_proxy(self, found_proxy):
self.tor_proxy = found_proxy
self.tor_cb.setText("Use Tor proxy at port " + str(found_proxy[1]))
if self.proxy_mode.currentIndex() == self.proxy_mode.findText('SOCKS5') \
and self.proxy_host.text() == "127.0.0.1" \
and self.proxy_port.text() == str(found_proxy[1]):
self.tor_cb.setChecked(True)
self.tor_cb.show()
def use_tor_proxy(self, use_it):
if not use_it:
self.proxy_cb.setChecked(False)
else:
socks5_mode_index = self.proxy_mode.findText('SOCKS5')
if socks5_mode_index == -1:
print_error("[network_dialog] can't find proxy_mode 'SOCKS5'")
return
self.proxy_mode.setCurrentIndex(socks5_mode_index)
self.proxy_host.setText("127.0.0.1")
self.proxy_port.setText(str(self.tor_proxy[1]))
self.proxy_user.setText("")
self.proxy_password.setText("")
self.tor_cb.setChecked(True)
self.proxy_cb.setChecked(True)
self.check_disable_proxy(use_it)
self.set_proxy()
def proxy_settings_changed(self):
self.tor_cb.setChecked(False)
class TorDetector(QThread):
found_proxy = pyqtSignal(object)
def __init__(self):
QThread.__init__(self)
def run(self):
# Probable ports for Tor to listen at
ports = [9050, 9150]
for p in ports:
if TorDetector.is_tor_port(p):
self.found_proxy.emit(("127.0.0.1", p))
return
@staticmethod
def is_tor_port(port):
try:
s = (socket._socketobject if hasattr(socket, "_socketobject") else socket.socket)(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
s.connect(("127.0.0.1", port))
# Tor responds uniquely to HTTP-like requests
s.send(b"GET\n")
if b"Tor is not an HTTP Proxy" in s.recv(1024):
return True
except socket.error:
pass
return False
| mit | 6,197,286,717,328,989,000 | 38.019268 | 146 | 0.611723 | false |
mancoast/CPythonPyc_test | fail/311_test_importhooks.py | 1 | 8115 | import sys
import imp
import os
import unittest
from test import support
test_src = """\
def get_name():
return __name__
def get_file():
return __file__
"""
absimp = "import sub\n"
relimp = "from . import sub\n"
deeprelimp = "from .... import sub\n"
futimp = "from __future__ import absolute_import\n"
reload_src = test_src+"""\
reloaded = True
"""
test_co = compile(test_src, "<???>", "exec")
reload_co = compile(reload_src, "<???>", "exec")
test2_oldabs_co = compile(absimp + test_src, "<???>", "exec")
test2_newabs_co = compile(futimp + absimp + test_src, "<???>", "exec")
test2_newrel_co = compile(relimp + test_src, "<???>", "exec")
test2_deeprel_co = compile(deeprelimp + test_src, "<???>", "exec")
test2_futrel_co = compile(futimp + relimp + test_src, "<???>", "exec")
test_path = "!!!_test_!!!"
class TestImporter:
modules = {
"hooktestmodule": (False, test_co),
"hooktestpackage": (True, test_co),
"hooktestpackage.sub": (True, test_co),
"hooktestpackage.sub.subber": (True, test_co),
"hooktestpackage.oldabs": (False, test2_oldabs_co),
"hooktestpackage.newabs": (False, test2_newabs_co),
"hooktestpackage.newrel": (False, test2_newrel_co),
"hooktestpackage.sub.subber.subest": (True, test2_deeprel_co),
"hooktestpackage.futrel": (False, test2_futrel_co),
"sub": (False, test_co),
"reloadmodule": (False, test_co),
}
def __init__(self, path=test_path):
if path != test_path:
# if out class is on sys.path_hooks, we must raise
# ImportError for any path item that we can't handle.
raise ImportError
self.path = path
def _get__path__(self):
raise NotImplementedError
def find_module(self, fullname, path=None):
if fullname in self.modules:
return self
else:
return None
def load_module(self, fullname):
ispkg, code = self.modules[fullname]
mod = sys.modules.setdefault(fullname,imp.new_module(fullname))
mod.__file__ = "<%s>" % self.__class__.__name__
mod.__loader__ = self
if ispkg:
mod.__path__ = self._get__path__()
exec(code, mod.__dict__)
return mod
class MetaImporter(TestImporter):
def _get__path__(self):
return []
class PathImporter(TestImporter):
def _get__path__(self):
return [self.path]
class ImportBlocker:
"""Place an ImportBlocker instance on sys.meta_path and you
can be sure the modules you specified can't be imported, even
if it's a builtin."""
def __init__(self, *namestoblock):
self.namestoblock = dict.fromkeys(namestoblock)
def find_module(self, fullname, path=None):
if fullname in self.namestoblock:
return self
return None
def load_module(self, fullname):
raise ImportError("I dare you")
class ImpWrapper:
def __init__(self, path=None):
if path is not None and not os.path.isdir(path):
raise ImportError
self.path = path
def find_module(self, fullname, path=None):
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [self.path]
try:
file, filename, stuff = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(file, filename, stuff)
class ImpLoader:
def __init__(self, file, filename, stuff):
self.file = file
self.filename = filename
self.stuff = stuff
def load_module(self, fullname):
mod = imp.load_module(fullname, self.file, self.filename, self.stuff)
if self.file:
self.file.close()
mod.__loader__ = self # for introspection
return mod
class ImportHooksBaseTestCase(unittest.TestCase):
def setUp(self):
self.path = sys.path[:]
self.meta_path = sys.meta_path[:]
self.path_hooks = sys.path_hooks[:]
sys.path_importer_cache.clear()
self.modules_before = sys.modules.copy()
def tearDown(self):
sys.path[:] = self.path
sys.meta_path[:] = self.meta_path
sys.path_hooks[:] = self.path_hooks
sys.path_importer_cache.clear()
sys.modules.clear()
sys.modules.update(self.modules_before)
class ImportHooksTestCase(ImportHooksBaseTestCase):
def doTestImports(self, importer=None):
import hooktestmodule
import hooktestpackage
import hooktestpackage.sub
import hooktestpackage.sub.subber
self.assertEqual(hooktestmodule.get_name(),
"hooktestmodule")
self.assertEqual(hooktestpackage.get_name(),
"hooktestpackage")
self.assertEqual(hooktestpackage.sub.get_name(),
"hooktestpackage.sub")
self.assertEqual(hooktestpackage.sub.subber.get_name(),
"hooktestpackage.sub.subber")
if importer:
self.assertEqual(hooktestmodule.__loader__, importer)
self.assertEqual(hooktestpackage.__loader__, importer)
self.assertEqual(hooktestpackage.sub.__loader__, importer)
self.assertEqual(hooktestpackage.sub.subber.__loader__, importer)
TestImporter.modules['reloadmodule'] = (False, test_co)
import reloadmodule
self.assertFalse(hasattr(reloadmodule,'reloaded'))
import hooktestpackage.newrel
self.assertEqual(hooktestpackage.newrel.get_name(),
"hooktestpackage.newrel")
self.assertEqual(hooktestpackage.newrel.sub,
hooktestpackage.sub)
import hooktestpackage.sub.subber.subest as subest
self.assertEqual(subest.get_name(),
"hooktestpackage.sub.subber.subest")
self.assertEqual(subest.sub,
hooktestpackage.sub)
import hooktestpackage.futrel
self.assertEqual(hooktestpackage.futrel.get_name(),
"hooktestpackage.futrel")
self.assertEqual(hooktestpackage.futrel.sub,
hooktestpackage.sub)
import sub
self.assertEqual(sub.get_name(), "sub")
import hooktestpackage.oldabs
self.assertEqual(hooktestpackage.oldabs.get_name(),
"hooktestpackage.oldabs")
self.assertEqual(hooktestpackage.oldabs.sub, sub)
import hooktestpackage.newabs
self.assertEqual(hooktestpackage.newabs.get_name(),
"hooktestpackage.newabs")
self.assertEqual(hooktestpackage.newabs.sub, sub)
def testMetaPath(self):
i = MetaImporter()
sys.meta_path.append(i)
self.doTestImports(i)
def testPathHook(self):
sys.path_hooks.append(PathImporter)
sys.path.append(test_path)
self.doTestImports()
def testBlocker(self):
mname = "exceptions" # an arbitrary harmless builtin module
if mname in sys.modules:
del sys.modules[mname]
sys.meta_path.append(ImportBlocker(mname))
try:
__import__(mname)
except ImportError:
pass
else:
self.fail("'%s' was not supposed to be importable" % mname)
def testImpWrapper(self):
i = ImpWrapper()
sys.meta_path.append(i)
sys.path_hooks.append(ImpWrapper)
mnames = ("colorsys", "urllib.parse", "distutils.core")
for mname in mnames:
parent = mname.split(".")[0]
for n in list(sys.modules):
if n.startswith(parent):
del sys.modules[n]
for mname in mnames:
m = __import__(mname, globals(), locals(), ["__dummy__"])
m.__loader__ # to make sure we actually handled the import
def test_main():
support.run_unittest(ImportHooksTestCase)
if __name__ == "__main__":
test_main()
| gpl-3.0 | -496,799,814,455,263,700 | 30.948819 | 77 | 0.593715 | false |
phpnick/RegPy | tm.py | 1 | 1210 | """
FoSAPy - TM module
Author: Niklas Rieken
"""
import time
class TM():
""" M = (Q, Sigma, Gamma, delta, q_0, q_f, B) """
Q = []
Sigma = []
Gamma = []
delta = {}
q_0 = None
q_f = None
B = None
def __init__(self, Q, Sigma, Gamma, delta, q_0, q_f, B='B'):
""" Constructor """
self.Q = Q
self.Sigma = Sigma
self.Gamma = Gamma
self.delta = delta
self.q_0 = q_0
self.q_f = q_f
self.B = B
def __repr__(self):
""" To string method """
return "M = (\n\tQ = {0},\n\tSigma = {1},\n\tGamma = {2},\n\tdelta = {3},\n\tq_0 = {4},\n\tq_f = {5},\n\tB = {6}\n)".format(self.Q, self.Sigma, self.Gamma, self.delta, self.q_0, self.q_f, self.B)
def simulate(self, w):
""" Runs w on M """
q = self.q_0
u = ''
v = w
print("{0} {1} {2}".format(u, q, v))
time.sleep(2)
while q != self.q_f:
if len(v) == 0:
v = 'B'
p = self.delta[q, v[0]][0]
v = self.delta[q, v[0]][1] + v[1:]
if self.delta[q, v[0]][2] == 'L':
if len(u) == 0:
u = 'B'
v = u[-1] + v
u = u[:-1]
elif self.delta[q, v[0]][2] == 'R':
if len(v) == 0:
v = 'B'
u = u + v[0]
v = v[1:]
else:
pass
q = p
print("{0} {1} {2}".format(u, q, v))
time.sleep(2)
| mit | 822,664,330,862,086,500 | 18.836066 | 197 | 0.465289 | false |
Forage/Gramps | gramps/gen/datehandler/_date_cs.py | 1 | 8857 | # -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
"""
Czech-specific classes for parsing and displaying dates.
"""
from __future__ import unicode_literals
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from ..lib.date import Date
from ._dateparser import DateParser
from ._datedisplay import DateDisplay
from ._datehandler import register_datehandler
#-------------------------------------------------------------------------
#
# Czech parser
#
#-------------------------------------------------------------------------
class DateParserCZ(DateParser):
"""
Converts a text string into a Date object
"""
month_to_int = DateParser.month_to_int
month_to_int["leden"] = 1
month_to_int["ledna"] = 1
month_to_int["lednu"] = 1
month_to_int["led"] = 1
month_to_int["I"] = 1
month_to_int["i"] = 1
month_to_int["únor"] = 2
month_to_int["února"] = 2
month_to_int["únoru"] = 2
month_to_int["ún"] = 2
month_to_int["II"] = 2
month_to_int["ii"] = 2
month_to_int["březen"] = 3
month_to_int["března"] = 3
month_to_int["březnu"] = 3
month_to_int["bře"] = 3
month_to_int["III"] = 3
month_to_int["iii"] = 3
month_to_int["duben"] = 4
month_to_int["dubna"] = 4
month_to_int["dubnu"] = 4
month_to_int["dub"] = 4
month_to_int["IV"] = 4
month_to_int["iv"] = 4
month_to_int["květen"] = 5
month_to_int["května"] = 5
month_to_int["květnu"] = 5
month_to_int["V"] = 5
month_to_int["v"] = 5
month_to_int["červen"] = 6
month_to_int["června"] = 6
month_to_int["červnu"] = 6
month_to_int["čer"] = 6
month_to_int["vi"] = 6
month_to_int["červenec"] = 7
month_to_int["července"] = 7
month_to_int["červenci"] = 7
month_to_int["čvc"] = 7
month_to_int["VII"] = 7
month_to_int["vii"] = 7
month_to_int["srpen"] = 8
month_to_int["srpna"] = 8
month_to_int["srpnu"] = 8
month_to_int["srp"] = 8
month_to_int["VIII"] = 8
month_to_int["viii"] = 8
month_to_int["září"] = 9
month_to_int["zář"] = 9
month_to_int["IX"] = 9
month_to_int["ix"] = 9
month_to_int["říjen"] = 10
month_to_int["října"] = 10
month_to_int["říjnu"] = 10
month_to_int["říj"] = 10
month_to_int["X"] = 10
month_to_int["x"] = 10
month_to_int["listopad"] = 11
month_to_int["listopadu"] = 11
month_to_int["lis"] = 11
month_to_int["XI"] = 11
month_to_int["xi"] = 11
month_to_int["prosinec"] = 12
month_to_int["prosince"] = 12
month_to_int["prosinci"] = 12
month_to_int["pro"] = 12
month_to_int["XII"] = 12
month_to_int["xii"] = 12
modifier_to_int = {
'před' : Date.MOD_BEFORE,
'do' : Date.MOD_BEFORE,
'po' : Date.MOD_AFTER,
'asi' : Date.MOD_ABOUT,
'kolem' : Date.MOD_ABOUT,
'přibl.' : Date.MOD_ABOUT,
}
calendar_to_int = {
'gregoriánský' : Date.CAL_GREGORIAN,
'greg.' : Date.CAL_GREGORIAN,
'g' : Date.CAL_GREGORIAN,
'juliánský' : Date.CAL_JULIAN,
'jul.' : Date.CAL_JULIAN,
'j' : Date.CAL_JULIAN,
'hebrejský' : Date.CAL_HEBREW,
'hebr.' : Date.CAL_HEBREW,
'h' : Date.CAL_HEBREW,
'islámský' : Date.CAL_ISLAMIC,
'isl.' : Date.CAL_ISLAMIC,
'i' : Date.CAL_ISLAMIC,
'francouzský republikánský' : Date.CAL_FRENCH,
'fr.' : Date.CAL_FRENCH,
'perský' : Date.CAL_PERSIAN,
'per.' : Date.CAL_PERSIAN,
'p' : Date.CAL_PERSIAN,
'švédský' : Date.CAL_SWEDISH,
'sve.' : Date.CAL_SWEDISH,
's' : Date.CAL_SWEDISH,
}
quality_to_int = {
'odhadované' : Date.QUAL_ESTIMATED,
'odh.' : Date.QUAL_ESTIMATED,
'vypočtené' : Date.QUAL_CALCULATED,
'vyp.' : Date.QUAL_CALCULATED,
}
def init_strings(self):
DateParser.init_strings(self)
self._span = re.compile(
"(od)\s+(?P<start>.+)\s+(do)\s+(?P<stop>.+)",
re.IGNORECASE)
self._range = re.compile(
"(mezi)\s+(?P<start>.+)\s+(a)\s+(?P<stop>.+)",
re.IGNORECASE)
#-------------------------------------------------------------------------
#
# Czech display
#
#-------------------------------------------------------------------------
class DateDisplayCZ(DateDisplay):
"""
Czech language date display class.
"""
long_months = ( "", "leden", "únor", "březen", "duben", "květen",
"červen", "červenec", "srpen", "září", "říjen",
"listopad", "prosinec" )
short_months = ( "", "led", "úno", "bře", "dub", "kvě", "čer",
"čvc", "srp", "zář", "říj", "lis", "pro" )
calendar = (
"", "juliánský", "hebrejský",
"francouzský republikánský", "perský", "islámský",
"švédský"
)
_mod_str = ("", "před ", "po ", "kolem ", "", "", "")
_qual_str = ("", "přibližně ", "vypočteno ")
bce = ["před naším letopočtem", "před Kristem",
"př. n. l.", "př. Kr."] + DateParser.bce
formats = (
"ISO (rrrr-mm-dd)",
"numerický",
"měsíc den, Rok",
"měs den, Rok",
"den. měsíc rok",
"den. měs rok"
)
def display(self, date):
"""
Return a text string representing the date.
"""
mod = date.get_modifier()
cal = date.get_calendar()
qual = date.get_quality()
start = date.get_start_date()
newyear = date.get_new_year()
qual_str = self._qual_str[qual]
if mod == Date.MOD_TEXTONLY:
return date.get_text()
elif start == Date.EMPTY:
return ""
elif mod == Date.MOD_NONE:
date_decl_string = self.display_cal[cal](start)
date_decl_string = date_decl_string.replace("den ", "dna ")
date_decl_string = date_decl_string.replace("or ", "ora ")
date_decl_string = date_decl_string.replace("en ", "na ")
date_decl_string = date_decl_string.replace("ad ", "adu ")
date_decl_string = date_decl_string.replace("ec ", "ce ")
return date_decl_string
elif mod == Date.MOD_SPAN:
dat1 = self.display_cal[cal](start)
dat2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, 'od', dat1,
'do', dat2, scal)
elif mod == Date.MOD_RANGE:
dat1 = self.display_cal[cal](start)
dat2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, 'mezi',
dat1, 'a', dat2, scal)
else:
text = self.display_cal[date.get_calendar()](start)
scal = self.format_extras(cal, newyear)
return "%s%s%s%s" % (qual_str, self._mod_str[mod],
text, scal)
#-------------------------------------------------------------------------
#
# Register classes
#
#-------------------------------------------------------------------------
register_datehandler(("cs", "CS", "cs_CZ", "Czech"), DateParserCZ, DateDisplayCZ)
| gpl-2.0 | 3,832,893,781,411,460,000 | 31.42963 | 81 | 0.473961 | false |
henry0312/LightGBM | examples/python-guide/dask/ranking.py | 1 | 1603 | import os
import dask.array as da
import numpy as np
from distributed import Client, LocalCluster
from sklearn.datasets import load_svmlight_file
import lightgbm as lgb
if __name__ == "__main__":
print("loading data")
X, y = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../../lambdarank/rank.train'))
group = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../../lambdarank/rank.train.query'))
print("initializing a Dask cluster")
cluster = LocalCluster(n_workers=2)
client = Client(cluster)
print("created a Dask LocalCluster")
print("distributing training data on the Dask cluster")
# split training data into two partitions
rows_in_part1 = int(np.sum(group[:100]))
rows_in_part2 = X.shape[0] - rows_in_part1
num_features = X.shape[1]
# make this array dense because we're splitting across
# a sparse boundary to partition the data
X = X.todense()
dX = da.from_array(
x=X,
chunks=[
(rows_in_part1, rows_in_part2),
(num_features,)
]
)
dy = da.from_array(
x=y,
chunks=[
(rows_in_part1, rows_in_part2),
]
)
dg = da.from_array(
x=group,
chunks=[
(100, group.size - 100)
]
)
print("beginning training")
dask_model = lgb.DaskLGBMRanker(n_estimators=10)
dask_model.fit(dX, dy, group=dg)
assert dask_model.fitted_
print("done training")
| mit | 5,765,375,206,655,105,000 | 24.854839 | 87 | 0.578291 | false |
hobson/pug | docs/source/conf.py | 1 | 11525 | # -*- coding: utf-8 -*-
#
# PUG documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 11 17:46:58 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pug'
copyright = u'2015, PDX Python User Group'
author = u'PDX Python User Group'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.21'
# The full version, including alpha/beta/rc tags.
release = '0.0.21'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pugdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PUG.tex', u'PUG Documentation',
u'PDX Python User Group', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pug', u'PUG Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PUG', u'PUG Documentation',
author, 'PUG', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| mit | 2,219,194,266,160,377,900 | 30.40327 | 80 | 0.706291 | false |
CrawlScript/Tensorflow-AutoEncoder | tutorial_iris.py | 1 | 3636 | #coding = utf-8
from mpl_toolkits.mplot3d import Axes3D
from autoencoder import AutoEncoder, DataIterator
import codecs
from random import shuffle
from matplotlib import pyplot as plt
import numpy as np
class IrisDataSet(object):
def get_label_id(self, label):
if label in self.label_id_dict:
return self.label_id_dict[label]
self.label_id_dict[label] = self.next_label_id
self.next_label_id += 1
return self.next_label_id - 1
def __init__(self):
self.next_label_id = 0
self.label_id_dict = {}
with codecs.open("tutorial_datasets/iris/iris.data", "r", "utf-8") as f:
str_datas = [line.strip() for line in f]
str_datas = [line.split(",") for line in str_datas if len(line) > 0]
shuffle(str_datas)
self.datas = [[float(d) for d in row_data[0:-1]] for row_data in str_datas]
# normalize datas
self.datas = np.array(self.datas, dtype = np.float32)
self.datas = self.datas/self.datas.max(0)
self.datas = self.datas * 2 - 1
self.labels = [self.get_label_id(row_data[-1]) for row_data in str_datas]
iris_dataset = IrisDataSet()
# train data
datas = iris_dataset.datas
labels = iris_dataset.labels
# data wrapper
iterator = DataIterator(datas)
fine_tuning_iterator = DataIterator(datas, labels = labels)
# train autoencoder
# assume the input dimension is input_d
# the network is like input_d -> 4 -> 2 -> 4 -> input_d
autoencoder = AutoEncoder()
# train autoencoder without fine-tuning
print "\ntrain autoencoder without fine-tuning ==========\n"
autoencoder.fit([4, 2], iterator, stacked = True, learning_rate = 0.02, max_epoch = 5000, tied = True, activation = "tanh")
# encode data (without fine-tuning)
encoded_datas = autoencoder.encode(datas)
print "encoder (without fine-tuning) ================"
print encoded_datas
# train autoencoder with fine-tuning
print "\ntrain autoencoder with fine-tuning ==========\n"
autoencoder.fine_tune(fine_tuning_iterator, supervised = True, learning_rate = 0.02, max_epoch = 10000, tied = True)
#autoencoder.fine_tune(fine_tuning_iterator, supervised = False, learning_rate = 0.02, max_epoch = 6000)
# encode data (with fine-tuning)
tuned_encoded_datas = autoencoder.encode(datas)
print "encoder (with fine-tuning)================"
print tuned_encoded_datas
# predict data( based on fine tuning )
predicted_datas = autoencoder.predict(datas)
print "predicted ================"
print predicted_datas
predicted_labels = predicted_datas.argmax(1)
eval_array = (predicted_labels == labels)
correct_count = len(np.where(eval_array == True)[0])
error_count = len(np.where(eval_array == False)[0])
correct_rate = float(correct_count)/(correct_count + error_count)
error_rate = float(error_count)/(correct_count + error_count)
print "correct: {}({})\terror: {}({})".format(correct_count, "%.2f" % correct_rate, error_count, "%.2f" % error_rate)
autoencoder.close()
#visualize encoded datas
colors = ["red", "green", "blue"]
label_colors = [colors[label_id] for label_id in labels]
fig_3d =plt.figure("origin iris data")
plot_3d = fig_3d.add_subplot(111, projection='3d')
plot_3d.scatter(datas[:,0], datas[:,1], datas[:, 2], color = label_colors)
fig_2d = plt.figure("encoded iris data (without fine-tuning)")
plot_2d = fig_2d.add_subplot(111)
plot_2d.scatter(encoded_datas[:,0], encoded_datas[:,1], color = label_colors)
fig_tuned_2d = plt.figure("encoded iris data (with fine-tuning)")
plot_tuned_2d = fig_tuned_2d.add_subplot(111)
plot_tuned_2d.scatter(tuned_encoded_datas[:,0], tuned_encoded_datas[:,1], color = label_colors)
plt.show()
| gpl-3.0 | -1,081,317,834,420,715,400 | 37.273684 | 123 | 0.681793 | false |
platformio/platformio | platformio/clients/account.py | 1 | 9820 | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from platformio import __accounts_api__, app
from platformio.clients.rest import RESTClient
from platformio.exception import PlatformioException
class AccountError(PlatformioException):
MESSAGE = "{0}"
class AccountNotAuthorized(AccountError):
MESSAGE = "You are not authorized! Please log in to PIO Account."
class AccountAlreadyAuthorized(AccountError):
MESSAGE = "You are already authorized with {0} account."
class AccountClient(RESTClient): # pylint:disable=too-many-public-methods
SUMMARY_CACHE_TTL = 60 * 60 * 24 * 7
def __init__(self):
super(AccountClient, self).__init__(base_url=__accounts_api__)
@staticmethod
def get_refresh_token():
try:
return app.get_state_item("account").get("auth").get("refresh_token")
except: # pylint:disable=bare-except
raise AccountNotAuthorized()
@staticmethod
def delete_local_session():
app.delete_state_item("account")
@staticmethod
def delete_local_state(key):
account = app.get_state_item("account")
if not account or key not in account:
return
del account[key]
app.set_state_item("account", account)
def send_auth_request(self, *args, **kwargs):
headers = kwargs.get("headers", {})
if "Authorization" not in headers:
token = self.fetch_authentication_token()
headers["Authorization"] = "Bearer %s" % token
kwargs["headers"] = headers
return self.send_request(*args, **kwargs)
def login(self, username, password):
try:
self.fetch_authentication_token()
except: # pylint:disable=bare-except
pass
else:
raise AccountAlreadyAuthorized(
app.get_state_item("account", {}).get("email", "")
)
result = self.send_request(
"post", "/v1/login", data={"username": username, "password": password},
)
app.set_state_item("account", result)
return result
def login_with_code(self, client_id, code, redirect_uri):
try:
self.fetch_authentication_token()
except: # pylint:disable=bare-except
pass
else:
raise AccountAlreadyAuthorized(
app.get_state_item("account", {}).get("email", "")
)
result = self.send_request(
"post",
"/v1/login/code",
data={"client_id": client_id, "code": code, "redirect_uri": redirect_uri},
)
app.set_state_item("account", result)
return result
def logout(self):
refresh_token = self.get_refresh_token()
self.delete_local_session()
try:
self.send_request(
"post", "/v1/logout", data={"refresh_token": refresh_token},
)
except AccountError:
pass
return True
def change_password(self, old_password, new_password):
return self.send_auth_request(
"post",
"/v1/password",
data={"old_password": old_password, "new_password": new_password},
)
def registration(
self, username, email, password, firstname, lastname
): # pylint:disable=too-many-arguments
try:
self.fetch_authentication_token()
except: # pylint:disable=bare-except
pass
else:
raise AccountAlreadyAuthorized(
app.get_state_item("account", {}).get("email", "")
)
return self.send_request(
"post",
"/v1/registration",
data={
"username": username,
"email": email,
"password": password,
"firstname": firstname,
"lastname": lastname,
},
)
def auth_token(self, password, regenerate):
return self.send_auth_request(
"post",
"/v1/token",
data={"password": password, "regenerate": 1 if regenerate else 0},
).get("auth_token")
def forgot_password(self, username):
return self.send_request("post", "/v1/forgot", data={"username": username},)
def get_profile(self):
return self.send_auth_request("get", "/v1/profile",)
def update_profile(self, profile, current_password):
profile["current_password"] = current_password
self.delete_local_state("summary")
response = self.send_auth_request("put", "/v1/profile", data=profile,)
return response
def get_account_info(self, offline=False):
account = app.get_state_item("account") or {}
if (
account.get("summary")
and account["summary"].get("expire_at", 0) > time.time()
):
return account["summary"]
if offline and account.get("email"):
return {
"profile": {
"email": account.get("email"),
"username": account.get("username"),
}
}
result = self.send_auth_request("get", "/v1/summary",)
account["summary"] = dict(
profile=result.get("profile"),
packages=result.get("packages"),
subscriptions=result.get("subscriptions"),
user_id=result.get("user_id"),
expire_at=int(time.time()) + self.SUMMARY_CACHE_TTL,
)
app.set_state_item("account", account)
return result
def destroy_account(self):
return self.send_auth_request("delete", "/v1/account")
def create_org(self, orgname, email, displayname):
return self.send_auth_request(
"post",
"/v1/orgs",
data={"orgname": orgname, "email": email, "displayname": displayname},
)
def get_org(self, orgname):
return self.send_auth_request("get", "/v1/orgs/%s" % orgname)
def list_orgs(self):
return self.send_auth_request("get", "/v1/orgs",)
def update_org(self, orgname, data):
return self.send_auth_request(
"put", "/v1/orgs/%s" % orgname, data={k: v for k, v in data.items() if v}
)
def destroy_org(self, orgname):
return self.send_auth_request("delete", "/v1/orgs/%s" % orgname,)
def add_org_owner(self, orgname, username):
return self.send_auth_request(
"post", "/v1/orgs/%s/owners" % orgname, data={"username": username},
)
def list_org_owners(self, orgname):
return self.send_auth_request("get", "/v1/orgs/%s/owners" % orgname,)
def remove_org_owner(self, orgname, username):
return self.send_auth_request(
"delete", "/v1/orgs/%s/owners" % orgname, data={"username": username},
)
def create_team(self, orgname, teamname, description):
return self.send_auth_request(
"post",
"/v1/orgs/%s/teams" % orgname,
data={"name": teamname, "description": description},
)
def destroy_team(self, orgname, teamname):
return self.send_auth_request(
"delete", "/v1/orgs/%s/teams/%s" % (orgname, teamname),
)
def get_team(self, orgname, teamname):
return self.send_auth_request(
"get", "/v1/orgs/%s/teams/%s" % (orgname, teamname),
)
def list_teams(self, orgname):
return self.send_auth_request("get", "/v1/orgs/%s/teams" % orgname,)
def update_team(self, orgname, teamname, data):
return self.send_auth_request(
"put",
"/v1/orgs/%s/teams/%s" % (orgname, teamname),
data={k: v for k, v in data.items() if v},
)
def add_team_member(self, orgname, teamname, username):
return self.send_auth_request(
"post",
"/v1/orgs/%s/teams/%s/members" % (orgname, teamname),
data={"username": username},
)
def remove_team_member(self, orgname, teamname, username):
return self.send_auth_request(
"delete",
"/v1/orgs/%s/teams/%s/members" % (orgname, teamname),
data={"username": username},
)
def fetch_authentication_token(self):
if os.environ.get("PLATFORMIO_AUTH_TOKEN"):
return os.environ.get("PLATFORMIO_AUTH_TOKEN")
auth = app.get_state_item("account", {}).get("auth", {})
if auth.get("access_token") and auth.get("access_token_expire"):
if auth.get("access_token_expire") > time.time():
return auth.get("access_token")
if auth.get("refresh_token"):
try:
result = self.send_request(
"post",
"/v1/login",
headers={
"Authorization": "Bearer %s" % auth.get("refresh_token")
},
)
app.set_state_item("account", result)
return result.get("auth").get("access_token")
except AccountError:
self.delete_local_session()
raise AccountNotAuthorized()
| apache-2.0 | -9,000,951,945,673,739,000 | 32.862069 | 86 | 0.563238 | false |
KDNT/p2pool-worldcoin-old | p2pool/data.py | 1 | 55789 | from __future__ import division
import hashlib
import os
import random
import sys
import time
from twisted.python import log
import p2pool
from p2pool.bitcoin import data as bitcoin_data, script, sha256
from p2pool.util import math, forest, pack
# hashlink
hash_link_type = pack.ComposedType([
('state', pack.FixedStrType(32)),
('extra_data', pack.FixedStrType(0)), # bit of a hack, but since the donation script is at the end, const_ending is long enough to always make this empty
('length', pack.VarIntType()),
])
def prefix_to_hash_link(prefix, const_ending=''):
assert prefix.endswith(const_ending), (prefix, const_ending)
x = sha256.sha256(prefix)
return dict(state=x.state, extra_data=x.buf[:max(0, len(x.buf)-len(const_ending))], length=x.length//8)
def check_hash_link(hash_link, data, const_ending=''):
extra_length = hash_link['length'] % (512//8)
assert len(hash_link['extra_data']) == max(0, extra_length - len(const_ending))
extra = (hash_link['extra_data'] + const_ending)[len(hash_link['extra_data']) + len(const_ending) - extra_length:]
assert len(extra) == extra_length
return pack.IntType(256).unpack(hashlib.sha256(sha256.sha256(data, (hash_link['state'], extra, 8*hash_link['length'])).digest()).digest())
# shares
share_type = pack.ComposedType([
('type', pack.VarIntType()),
('contents', pack.VarStrType()),
])
def load_share(share, net, peer_addr):
assert peer_addr is None or isinstance(peer_addr, tuple)
if share['type'] < Share.VERSION:
from p2pool import p2p
raise p2p.PeerMisbehavingError('sent an obsolete share')
elif share['type'] == Share.VERSION:
return Share(net, peer_addr, Share.share_type.unpack(share['contents']))
elif share['type'] == NewShare.VERSION:
return NewShare(net, peer_addr, NewShare.share_type.unpack(share['contents']))
else:
raise ValueError('unknown share type: %r' % (share['type'],))
DONATION_SCRIPT = '4104ffd03de44a6e11b9917f3a29f9443283d9871c9d743ef30d5eddcd37094b64d1b3d8090496b53256786bf5c82932ec23c3b74d9f05a6f95a8b5529352656664bac'.decode('hex')
class NewShare(object):
VERSION = 13
VOTING_VERSION = 13
SUCCESSOR = None
small_block_header_type = pack.ComposedType([
('version', pack.VarIntType()),
('previous_block', pack.PossiblyNoneType(0, pack.IntType(256))),
('timestamp', pack.IntType(32)),
('bits', bitcoin_data.FloatingIntegerType()),
('nonce', pack.IntType(32)),
])
share_info_type = pack.ComposedType([
('share_data', pack.ComposedType([
('previous_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
('coinbase', pack.VarStrType()),
('nonce', pack.IntType(32)),
('pubkey_hash', pack.IntType(160)),
('subsidy', pack.IntType(64)),
('donation', pack.IntType(16)),
('stale_info', pack.EnumType(pack.IntType(8), dict((k, {0: None, 253: 'orphan', 254: 'doa'}.get(k, 'unk%i' % (k,))) for k in xrange(256)))),
('desired_version', pack.VarIntType()),
])),
('new_transaction_hashes', pack.ListType(pack.IntType(256))),
('transaction_hash_refs', pack.ListType(pack.VarIntType(), 2)), # pairs of share_count, tx_count
('far_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
('max_bits', bitcoin_data.FloatingIntegerType()),
('bits', bitcoin_data.FloatingIntegerType()),
('timestamp', pack.IntType(32)),
('absheight', pack.IntType(32)),
('abswork', pack.IntType(128)),
])
share_type = pack.ComposedType([
('min_header', small_block_header_type),
('share_info', share_info_type),
('ref_merkle_link', pack.ComposedType([
('branch', pack.ListType(pack.IntType(256))),
('index', pack.IntType(0)),
])),
('last_txout_nonce', pack.IntType(64)),
('hash_link', hash_link_type),
('merkle_link', pack.ComposedType([
('branch', pack.ListType(pack.IntType(256))),
('index', pack.IntType(0)), # it will always be 0
])),
])
ref_type = pack.ComposedType([
('identifier', pack.FixedStrType(64//8)),
('share_info', share_info_type),
])
gentx_before_refhash = pack.VarStrType().pack(DONATION_SCRIPT) + pack.IntType(64).pack(0) + pack.VarStrType().pack('\x6a\x28' + pack.IntType(256).pack(0) + pack.IntType(64).pack(0))[:3]
@classmethod
def generate_transaction(cls, tracker, share_data, block_target, desired_timestamp, desired_target, ref_merkle_link, desired_other_transaction_hashes_and_fees, net, known_txs=None, last_txout_nonce=0, base_subsidy=None):
previous_share = tracker.items[share_data['previous_share_hash']] if share_data['previous_share_hash'] is not None else None
height, last = tracker.get_height_and_last(share_data['previous_share_hash'])
assert height >= net.REAL_CHAIN_LENGTH or last is None
if height < net.TARGET_LOOKBEHIND:
pre_target3 = net.MAX_TARGET
else:
attempts_per_second = get_pool_attempts_per_second(tracker, share_data['previous_share_hash'], net.TARGET_LOOKBEHIND, min_work=True, integer=True)
pre_target = 2**256//(net.NEW_SHARE_PERIOD*attempts_per_second) - 1 if attempts_per_second else 2**256-1
pre_target2 = math.clip(pre_target, (previous_share.max_target*9//10, previous_share.max_target*11//10))
pre_target3 = math.clip(pre_target2, (net.MIN_TARGET, net.MAX_TARGET))
max_bits = bitcoin_data.FloatingInteger.from_target_upper_bound(pre_target3)
bits = bitcoin_data.FloatingInteger.from_target_upper_bound(math.clip(desired_target, (pre_target3//30, pre_target3)))
new_transaction_hashes = []
new_transaction_size = 0
transaction_hash_refs = []
other_transaction_hashes = []
past_shares = list(tracker.get_chain(share_data['previous_share_hash'], min(height, 100)))
tx_hash_to_this = {}
for i, share in enumerate(past_shares):
for j, tx_hash in enumerate(share.new_transaction_hashes):
if tx_hash not in tx_hash_to_this:
tx_hash_to_this[tx_hash] = [1+i, j] # share_count, tx_count
for tx_hash, fee in desired_other_transaction_hashes_and_fees:
if tx_hash in tx_hash_to_this:
this = tx_hash_to_this[tx_hash]
else:
if known_txs is not None:
this_size = bitcoin_data.tx_type.packed_size(known_txs[tx_hash])
if new_transaction_size + this_size > 50000: # only allow 50 kB of new txns/share
break
new_transaction_size += this_size
new_transaction_hashes.append(tx_hash)
this = [0, len(new_transaction_hashes)-1]
transaction_hash_refs.extend(this)
other_transaction_hashes.append(tx_hash)
included_transactions = set(other_transaction_hashes)
removed_fees = [fee for tx_hash, fee in desired_other_transaction_hashes_and_fees if tx_hash not in included_transactions]
definite_fees = sum(0 if fee is None else fee for tx_hash, fee in desired_other_transaction_hashes_and_fees if tx_hash in included_transactions)
if None not in removed_fees:
share_data = dict(share_data, subsidy=share_data['subsidy'] - sum(removed_fees))
else:
assert base_subsidy is not None
share_data = dict(share_data, subsidy=base_subsidy + definite_fees)
weights, total_weight, donation_weight = tracker.get_cumulative_weights(previous_share.share_data['previous_share_hash'] if previous_share is not None else None,
min(height, net.REAL_CHAIN_LENGTH-1),
65535*net.NEW_SPREAD*bitcoin_data.target_to_average_attempts(block_target),
)
assert total_weight == sum(weights.itervalues()) + donation_weight, (total_weight, sum(weights.itervalues()) + donation_weight)
amounts = dict((script, share_data['subsidy']*(199*weight)//(200*total_weight)) for script, weight in weights.iteritems()) # 99.5% goes according to weights prior to this share
this_script = bitcoin_data.pubkey_hash_to_script2(share_data['pubkey_hash'])
amounts[this_script] = amounts.get(this_script, 0) + share_data['subsidy']//200 # 0.5% goes to block finder
amounts[DONATION_SCRIPT] = amounts.get(DONATION_SCRIPT, 0) + share_data['subsidy'] - sum(amounts.itervalues()) # all that's left over is the donation weight and some extra satoshis due to rounding
if sum(amounts.itervalues()) != share_data['subsidy'] or any(x < 0 for x in amounts.itervalues()):
raise ValueError()
dests = sorted(amounts.iterkeys(), key=lambda script: (script == DONATION_SCRIPT, amounts[script], script))[-4000:] # block length limit, unlikely to ever be hit
share_info = dict(
share_data=share_data,
far_share_hash=None if last is None and height < 99 else tracker.get_nth_parent_hash(share_data['previous_share_hash'], 99),
max_bits=max_bits,
bits=bits,
timestamp=math.clip(desired_timestamp, (
(previous_share.timestamp + net.NEW_SHARE_PERIOD) - (net.NEW_SHARE_PERIOD - 1), # = previous_share.timestamp + 1
(previous_share.timestamp + net.NEW_SHARE_PERIOD) + (net.NEW_SHARE_PERIOD - 1),
)) if previous_share is not None else desired_timestamp,
new_transaction_hashes=new_transaction_hashes,
transaction_hash_refs=transaction_hash_refs,
absheight=((previous_share.absheight if previous_share is not None else 0) + 1) % 2**32,
abswork=((previous_share.abswork if previous_share is not None else 0) + bitcoin_data.target_to_average_attempts(bits.target)) % 2**128,
)
gentx = dict(
version=1,
tx_ins=[dict(
previous_output=None,
sequence=None,
script=share_data['coinbase'],
)],
tx_outs=[dict(value=amounts[script], script=script) for script in dests if amounts[script] or script == DONATION_SCRIPT] + [dict(
value=0,
script='\x6a\x28' + cls.get_ref_hash(net, share_info, ref_merkle_link) + pack.IntType(64).pack(last_txout_nonce),
)],
lock_time=0,
)
def get_share(header, last_txout_nonce=last_txout_nonce):
min_header = dict(header); del min_header['merkle_root']
share = cls(net, None, dict(
min_header=min_header,
share_info=share_info,
ref_merkle_link=dict(branch=[], index=0),
last_txout_nonce=(last_txout_nonce%2**32*2**32)|(last_txout_nonce>>32), # XXX
hash_link=prefix_to_hash_link(bitcoin_data.tx_type.pack(gentx)[:-32-8-4], cls.gentx_before_refhash),
merkle_link=bitcoin_data.calculate_merkle_link([None] + other_transaction_hashes, 0),
))
assert share.header == header # checks merkle_root
return share
return share_info, gentx, other_transaction_hashes, get_share
@classmethod
def get_ref_hash(cls, net, share_info, ref_merkle_link):
return pack.IntType(256).pack(bitcoin_data.check_merkle_link(bitcoin_data.hash256(cls.ref_type.pack(dict(
identifier=net.IDENTIFIER,
share_info=share_info,
))), ref_merkle_link))
__slots__ = 'net peer_addr contents min_header share_info hash_link merkle_link hash share_data max_target target timestamp previous_hash new_script desired_version gentx_hash header pow_hash header_hash new_transaction_hashes time_seen absheight abswork'.split(' ')
def __init__(self, net, peer_addr, contents):
self.net = net
self.peer_addr = peer_addr
self.contents = contents
self.min_header = contents['min_header']
self.share_info = contents['share_info']
self.hash_link = contents['hash_link']
self.merkle_link = contents['merkle_link']
if not (2 <= len(self.share_info['share_data']['coinbase']) <= 100):
raise ValueError('''bad coinbase size! %i bytes''' % (len(self.share_info['share_data']['coinbase']),))
if len(self.merkle_link['branch']) > 16:
raise ValueError('merkle branch too long!')
assert not self.hash_link['extra_data'], repr(self.hash_link['extra_data'])
self.share_data = self.share_info['share_data']
self.max_target = self.share_info['max_bits'].target
self.target = self.share_info['bits'].target
self.timestamp = self.share_info['timestamp']
self.previous_hash = self.share_data['previous_share_hash']
self.new_script = bitcoin_data.pubkey_hash_to_script2(self.share_data['pubkey_hash'])
self.desired_version = self.share_data['desired_version']
self.absheight = self.share_info['absheight']
self.abswork = self.share_info['abswork']
n = set()
for share_count, tx_count in self.iter_transaction_hash_refs():
assert share_count < 110
if share_count == 0:
n.add(tx_count)
assert n == set(range(len(self.share_info['new_transaction_hashes'])))
self.gentx_hash = check_hash_link(
self.hash_link,
self.get_ref_hash(net, self.share_info, contents['ref_merkle_link']) + pack.IntType(64).pack(self.contents['last_txout_nonce']) + pack.IntType(32).pack(0),
self.gentx_before_refhash,
)
merkle_root = bitcoin_data.check_merkle_link(self.gentx_hash, self.merkle_link)
self.header = dict(self.min_header, merkle_root=merkle_root)
self.pow_hash = net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(self.header))
self.hash = self.header_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(self.header))
if self.target > net.MAX_TARGET:
from p2pool import p2p
raise p2p.PeerMisbehavingError('share target invalid')
if self.pow_hash > self.target:
from p2pool import p2p
raise p2p.PeerMisbehavingError('share PoW invalid')
self.new_transaction_hashes = self.share_info['new_transaction_hashes']
# XXX eww
self.time_seen = time.time()
def __repr__(self):
return 'Share' + repr((self.net, self.peer_addr, self.contents))
def as_share(self):
return dict(type=self.VERSION, contents=self.share_type.pack(self.contents))
def iter_transaction_hash_refs(self):
return zip(self.share_info['transaction_hash_refs'][::2], self.share_info['transaction_hash_refs'][1::2])
def check(self, tracker):
from p2pool import p2p
if self.share_data['previous_share_hash'] is not None:
previous_share = tracker.items[self.share_data['previous_share_hash']]
if type(self) is type(previous_share):
pass
elif type(self) is type(previous_share).SUCCESSOR:
if tracker.get_height(previous_share.hash) < self.net.CHAIN_LENGTH:
from p2pool import p2p
raise p2p.PeerMisbehavingError('switch without enough history')
# switch only valid if 85% of hashes in [self.net.CHAIN_LENGTH*9//10, self.net.CHAIN_LENGTH] for new version
counts = get_desired_version_counts(tracker,
tracker.get_nth_parent_hash(previous_share.hash, self.net.CHAIN_LENGTH*9//10), self.net.CHAIN_LENGTH//10)
if counts.get(self.VERSION, 0) < sum(counts.itervalues())*85//100:
raise p2p.PeerMisbehavingError('switch without enough hash power upgraded')
else:
raise p2p.PeerMisbehavingError('''%s can't follow %s''' % (type(self).__name__, type(previous_share).__name__))
other_tx_hashes = [tracker.items[tracker.get_nth_parent_hash(self.hash, share_count)].share_info['new_transaction_hashes'][tx_count] for share_count, tx_count in self.iter_transaction_hash_refs()]
share_info, gentx, other_tx_hashes2, get_share = self.generate_transaction(tracker, self.share_info['share_data'], self.header['bits'].target, self.share_info['timestamp'], self.share_info['bits'].target, self.contents['ref_merkle_link'], [(h, None) for h in other_tx_hashes], self.net, last_txout_nonce=self.contents['last_txout_nonce'])
assert other_tx_hashes2 == other_tx_hashes
if share_info != self.share_info:
raise ValueError('share_info invalid')
if bitcoin_data.hash256(bitcoin_data.tx_type.pack(gentx)) != self.gentx_hash:
raise ValueError('''gentx doesn't match hash_link''')
if bitcoin_data.calculate_merkle_link([None] + other_tx_hashes, 0) != self.merkle_link:
raise ValueError('merkle_link and other_tx_hashes do not match')
return gentx # only used by as_block
def get_other_tx_hashes(self, tracker):
parents_needed = max(share_count for share_count, tx_count in self.iter_transaction_hash_refs()) if self.share_info['transaction_hash_refs'] else 0
parents = tracker.get_height(self.hash) - 1
if parents < parents_needed:
return None
last_shares = list(tracker.get_chain(self.hash, parents_needed + 1))
return [last_shares[share_count].share_info['new_transaction_hashes'][tx_count] for share_count, tx_count in self.iter_transaction_hash_refs()]
def _get_other_txs(self, tracker, known_txs):
other_tx_hashes = self.get_other_tx_hashes(tracker)
if other_tx_hashes is None:
return None # not all parents present
if not all(tx_hash in known_txs for tx_hash in other_tx_hashes):
return None # not all txs present
return [known_txs[tx_hash] for tx_hash in other_tx_hashes]
def should_punish_reason(self, previous_block, bits, tracker, known_txs):
if (self.header['previous_block'], self.header['bits']) != (previous_block, bits) and self.header_hash != previous_block and self.peer_addr is not None:
return True, 'Block-stale detected! height(%x) < height(%x) or %08x != %08x' % (self.header['previous_block'], previous_block, self.header['bits'].bits, bits.bits)
if self.pow_hash <= self.header['bits'].target:
return -1, 'block solution'
other_txs = self._get_other_txs(tracker, known_txs)
if other_txs is None:
pass
else:
all_txs_size = sum(bitcoin_data.tx_type.packed_size(tx) for tx in other_txs)
if all_txs_size > 1000000:
return True, 'txs over block size limit'
new_txs_size = sum(bitcoin_data.tx_type.packed_size(known_txs[tx_hash]) for tx_hash in self.share_info['new_transaction_hashes'])
if new_txs_size > 50000:
return True, 'new txs over limit'
return False, None
def as_block(self, tracker, known_txs):
other_txs = self._get_other_txs(tracker, known_txs)
if other_txs is None:
return None # not all txs present
return dict(header=self.header, txs=[self.check(tracker)] + other_txs)
class Share(object):
VERSION = 9
VOTING_VERSION = 11
SUCCESSOR = NewShare
absheight = abswork = 0
small_block_header_type = pack.ComposedType([
('version', pack.VarIntType()),
('previous_block', pack.PossiblyNoneType(0, pack.IntType(256))),
('timestamp', pack.IntType(32)),
('bits', bitcoin_data.FloatingIntegerType()),
('nonce', pack.IntType(32)),
])
share_info_type = pack.ComposedType([
('share_data', pack.ComposedType([
('previous_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
('coinbase', pack.VarStrType()),
('nonce', pack.IntType(32)),
('pubkey_hash', pack.IntType(160)),
('subsidy', pack.IntType(64)),
('donation', pack.IntType(16)),
('stale_info', pack.EnumType(pack.IntType(8), dict((k, {0: None, 253: 'orphan', 254: 'doa'}.get(k, 'unk%i' % (k,))) for k in xrange(256)))),
('desired_version', pack.VarIntType()),
])),
('new_transaction_hashes', pack.ListType(pack.IntType(256))),
('transaction_hash_refs', pack.ListType(pack.VarIntType(), 2)), # pairs of share_count, tx_count
('far_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
('max_bits', bitcoin_data.FloatingIntegerType()),
('bits', bitcoin_data.FloatingIntegerType()),
('timestamp', pack.IntType(32)),
])
share_type = pack.ComposedType([
('min_header', small_block_header_type),
('share_info', share_info_type),
('ref_merkle_link', pack.ComposedType([
('branch', pack.ListType(pack.IntType(256))),
('index', pack.IntType(0)),
])),
('last_txout_nonce', pack.IntType(32)),
('hash_link', hash_link_type),
('merkle_link', pack.ComposedType([
('branch', pack.ListType(pack.IntType(256))),
('index', pack.IntType(0)), # it will always be 0
])),
])
ref_type = pack.ComposedType([
('identifier', pack.FixedStrType(64//8)),
('share_info', share_info_type),
])
gentx_before_refhash = pack.VarStrType().pack(DONATION_SCRIPT) + pack.IntType(64).pack(0) + pack.VarStrType().pack('\x24' + pack.IntType(256).pack(0) + pack.IntType(32).pack(0))[:2]
@classmethod
def generate_transaction(cls, tracker, share_data, block_target, desired_timestamp, desired_target, ref_merkle_link, desired_other_transaction_hashes_and_fees, net, known_txs=None, last_txout_nonce=0, base_subsidy=None):
previous_share = tracker.items[share_data['previous_share_hash']] if share_data['previous_share_hash'] is not None else None
height, last = tracker.get_height_and_last(share_data['previous_share_hash'])
assert height >= net.REAL_CHAIN_LENGTH or last is None
if height < net.TARGET_LOOKBEHIND:
pre_target3 = net.MAX_TARGET
else:
attempts_per_second = get_pool_attempts_per_second(tracker, share_data['previous_share_hash'], net.TARGET_LOOKBEHIND, min_work=True, integer=True)
pre_target = 2**256//(net.SHARE_PERIOD*attempts_per_second) - 1 if attempts_per_second else 2**256-1
pre_target2 = math.clip(pre_target, (previous_share.max_target*9//10, previous_share.max_target*11//10))
pre_target3 = math.clip(pre_target2, (net.MIN_TARGET, net.MAX_TARGET))
max_bits = bitcoin_data.FloatingInteger.from_target_upper_bound(pre_target3)
bits = bitcoin_data.FloatingInteger.from_target_upper_bound(math.clip(desired_target, (pre_target3//10, pre_target3)))
new_transaction_hashes = []
new_transaction_size = 0
transaction_hash_refs = []
other_transaction_hashes = []
past_shares = list(tracker.get_chain(share_data['previous_share_hash'], min(height, 100)))
tx_hash_to_this = {}
for i, share in enumerate(past_shares):
for j, tx_hash in enumerate(share.new_transaction_hashes):
if tx_hash not in tx_hash_to_this:
tx_hash_to_this[tx_hash] = [1+i, j] # share_count, tx_count
for tx_hash, fee in desired_other_transaction_hashes_and_fees:
if tx_hash in tx_hash_to_this:
this = tx_hash_to_this[tx_hash]
else:
if known_txs is not None:
this_size = bitcoin_data.tx_type.packed_size(known_txs[tx_hash])
if new_transaction_size + this_size > 50000: # only allow 50 kB of new txns/share
break
new_transaction_size += this_size
new_transaction_hashes.append(tx_hash)
this = [0, len(new_transaction_hashes)-1]
transaction_hash_refs.extend(this)
other_transaction_hashes.append(tx_hash)
included_transactions = set(other_transaction_hashes)
removed_fees = [fee for tx_hash, fee in desired_other_transaction_hashes_and_fees if tx_hash not in included_transactions]
definite_fees = sum(0 if fee is None else fee for tx_hash, fee in desired_other_transaction_hashes_and_fees if tx_hash in included_transactions)
if None not in removed_fees:
share_data = dict(share_data, subsidy=share_data['subsidy'] - sum(removed_fees))
else:
assert base_subsidy is not None
share_data = dict(share_data, subsidy=base_subsidy + definite_fees)
weights, total_weight, donation_weight = tracker.get_cumulative_weights(share_data['previous_share_hash'],
min(height, net.REAL_CHAIN_LENGTH),
65535*net.SPREAD*bitcoin_data.target_to_average_attempts(block_target),
)
assert total_weight == sum(weights.itervalues()) + donation_weight, (total_weight, sum(weights.itervalues()) + donation_weight)
amounts = dict((script, share_data['subsidy']*(199*weight)//(200*total_weight)) for script, weight in weights.iteritems()) # 99.5% goes according to weights prior to this share
this_script = bitcoin_data.pubkey_hash_to_script2(share_data['pubkey_hash'])
amounts[this_script] = amounts.get(this_script, 0) + share_data['subsidy']//200 # 0.5% goes to block finder
amounts[DONATION_SCRIPT] = amounts.get(DONATION_SCRIPT, 0) + share_data['subsidy'] - sum(amounts.itervalues()) # all that's left over is the donation weight and some extra satoshis due to rounding
if sum(amounts.itervalues()) != share_data['subsidy'] or any(x < 0 for x in amounts.itervalues()):
raise ValueError()
dests = sorted(amounts.iterkeys(), key=lambda script: (script == DONATION_SCRIPT, amounts[script], script))[-4000:] # block length limit, unlikely to ever be hit
share_info = dict(
share_data=share_data,
far_share_hash=None if last is None and height < 99 else tracker.get_nth_parent_hash(share_data['previous_share_hash'], 99),
max_bits=max_bits,
bits=bits,
timestamp=math.clip(desired_timestamp, (
(previous_share.timestamp + net.SHARE_PERIOD) - (net.SHARE_PERIOD - 1), # = previous_share.timestamp + 1
(previous_share.timestamp + net.SHARE_PERIOD) + (net.SHARE_PERIOD - 1),
)) if previous_share is not None else desired_timestamp,
new_transaction_hashes=new_transaction_hashes,
transaction_hash_refs=transaction_hash_refs,
)
gentx = dict(
version=1,
tx_ins=[dict(
previous_output=None,
sequence=None,
script=share_data['coinbase'],
)],
tx_outs=[dict(value=amounts[script], script=script) for script in dests if amounts[script] or script == DONATION_SCRIPT] + [dict(
value=0,
script='\x24' + cls.get_ref_hash(net, share_info, ref_merkle_link) + pack.IntType(32).pack(last_txout_nonce),
)],
lock_time=0,
)
def get_share(header, last_txout_nonce=last_txout_nonce):
min_header = dict(header); del min_header['merkle_root']
share = cls(net, None, dict(
min_header=min_header,
share_info=share_info,
ref_merkle_link=dict(branch=[], index=0),
last_txout_nonce=last_txout_nonce,
hash_link=prefix_to_hash_link(bitcoin_data.tx_type.pack(gentx)[:-32-4-4], cls.gentx_before_refhash),
merkle_link=bitcoin_data.calculate_merkle_link([None] + other_transaction_hashes, 0),
))
assert share.header == header # checks merkle_root
return share
return share_info, gentx, other_transaction_hashes, get_share
@classmethod
def get_ref_hash(cls, net, share_info, ref_merkle_link):
return pack.IntType(256).pack(bitcoin_data.check_merkle_link(bitcoin_data.hash256(cls.ref_type.pack(dict(
identifier=net.IDENTIFIER,
share_info=share_info,
))), ref_merkle_link))
__slots__ = 'net peer_addr contents min_header share_info hash_link merkle_link hash share_data max_target target timestamp previous_hash new_script desired_version gentx_hash header pow_hash header_hash new_transaction_hashes time_seen'.split(' ')
def __init__(self, net, peer_addr, contents):
self.net = net
self.peer_addr = peer_addr
self.contents = contents
self.min_header = contents['min_header']
self.share_info = contents['share_info']
self.hash_link = contents['hash_link']
self.merkle_link = contents['merkle_link']
if not (2 <= len(self.share_info['share_data']['coinbase']) <= 100):
raise ValueError('''bad coinbase size! %i bytes''' % (len(self.share_info['share_data']['coinbase']),))
if len(self.merkle_link['branch']) > 16:
raise ValueError('merkle branch too long!')
assert not self.hash_link['extra_data'], repr(self.hash_link['extra_data'])
self.share_data = self.share_info['share_data']
self.max_target = self.share_info['max_bits'].target
self.target = self.share_info['bits'].target
self.timestamp = self.share_info['timestamp']
self.previous_hash = self.share_data['previous_share_hash']
self.new_script = bitcoin_data.pubkey_hash_to_script2(self.share_data['pubkey_hash'])
self.desired_version = self.share_data['desired_version']
n = set()
for share_count, tx_count in self.iter_transaction_hash_refs():
assert share_count < 110
if share_count == 0:
n.add(tx_count)
assert n == set(range(len(self.share_info['new_transaction_hashes'])))
self.gentx_hash = check_hash_link(
self.hash_link,
self.get_ref_hash(net, self.share_info, contents['ref_merkle_link']) + pack.IntType(32).pack(self.contents['last_txout_nonce']) + pack.IntType(32).pack(0),
self.gentx_before_refhash,
)
merkle_root = bitcoin_data.check_merkle_link(self.gentx_hash, self.merkle_link)
self.header = dict(self.min_header, merkle_root=merkle_root)
self.pow_hash = net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(self.header))
self.hash = self.header_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(self.header))
if self.target > net.MAX_TARGET:
from p2pool import p2p
raise p2p.PeerMisbehavingError('share target invalid')
if self.pow_hash > self.target:
from p2pool import p2p
raise p2p.PeerMisbehavingError('share PoW invalid')
self.new_transaction_hashes = self.share_info['new_transaction_hashes']
# XXX eww
self.time_seen = time.time()
def __repr__(self):
return 'Share' + repr((self.net, self.peer_addr, self.contents))
def as_share(self):
return dict(type=self.VERSION, contents=self.share_type.pack(self.contents))
def iter_transaction_hash_refs(self):
return zip(self.share_info['transaction_hash_refs'][::2], self.share_info['transaction_hash_refs'][1::2])
def check(self, tracker):
from p2pool import p2p
if self.share_data['previous_share_hash'] is not None:
previous_share = tracker.items[self.share_data['previous_share_hash']]
if type(self) is type(previous_share):
pass
elif type(self) is type(previous_share).SUCCESSOR:
if tracker.get_height(previous_share.hash) < self.net.CHAIN_LENGTH:
from p2pool import p2p
raise p2p.PeerMisbehavingError('switch without enough history')
# switch only valid if 85% of hashes in [self.net.CHAIN_LENGTH*9//10, self.net.CHAIN_LENGTH] for new version
counts = get_desired_version_counts(tracker,
tracker.get_nth_parent_hash(previous_share.hash, self.net.CHAIN_LENGTH*9//10), self.net.CHAIN_LENGTH//10)
if counts.get(self.VERSION, 0) < sum(counts.itervalues())*85//100:
raise p2p.PeerMisbehavingError('switch without enough hash power upgraded')
else:
raise p2p.PeerMisbehavingError('''%s can't follow %s''' % (type(self).__name__, type(previous_share).__name__))
other_tx_hashes = [tracker.items[tracker.get_nth_parent_hash(self.hash, share_count)].share_info['new_transaction_hashes'][tx_count] for share_count, tx_count in self.iter_transaction_hash_refs()]
share_info, gentx, other_tx_hashes2, get_share = self.generate_transaction(tracker, self.share_info['share_data'], self.header['bits'].target, self.share_info['timestamp'], self.share_info['bits'].target, self.contents['ref_merkle_link'], [(h, None) for h in other_tx_hashes], self.net, last_txout_nonce=self.contents['last_txout_nonce'])
assert other_tx_hashes2 == other_tx_hashes
if share_info != self.share_info:
raise ValueError('share_info invalid')
if bitcoin_data.hash256(bitcoin_data.tx_type.pack(gentx)) != self.gentx_hash:
raise ValueError('''gentx doesn't match hash_link''')
if bitcoin_data.calculate_merkle_link([None] + other_tx_hashes, 0) != self.merkle_link:
raise ValueError('merkle_link and other_tx_hashes do not match')
return gentx # only used by as_block
def get_other_tx_hashes(self, tracker):
parents_needed = max(share_count for share_count, tx_count in self.iter_transaction_hash_refs()) if self.share_info['transaction_hash_refs'] else 0
parents = tracker.get_height(self.hash) - 1
if parents < parents_needed:
return None
last_shares = list(tracker.get_chain(self.hash, parents_needed + 1))
return [last_shares[share_count].share_info['new_transaction_hashes'][tx_count] for share_count, tx_count in self.iter_transaction_hash_refs()]
def _get_other_txs(self, tracker, known_txs):
other_tx_hashes = self.get_other_tx_hashes(tracker)
if other_tx_hashes is None:
return None # not all parents present
if not all(tx_hash in known_txs for tx_hash in other_tx_hashes):
return None # not all txs present
return [known_txs[tx_hash] for tx_hash in other_tx_hashes]
def should_punish_reason(self, previous_block, bits, tracker, known_txs):
if (self.header['previous_block'], self.header['bits']) != (previous_block, bits) and self.header_hash != previous_block and self.peer_addr is not None:
return True, 'Block-stale detected! %x < %x' % (self.header['previous_block'], previous_block)
if self.pow_hash <= self.header['bits'].target:
return -1, 'block solution'
other_txs = self._get_other_txs(tracker, known_txs)
if other_txs is None:
if self.time_seen != 0: # ignore if loaded from ShareStore
return True, 'not all txs present'
else:
all_txs_size = sum(bitcoin_data.tx_type.packed_size(tx) for tx in other_txs)
if all_txs_size > 1000000:
return True, 'txs over block size limit'
new_txs_size = sum(bitcoin_data.tx_type.packed_size(known_txs[tx_hash]) for tx_hash in self.share_info['new_transaction_hashes'])
if new_txs_size > 50000:
return True, 'new txs over limit'
return False, None
def as_block(self, tracker, known_txs):
other_txs = self._get_other_txs(tracker, known_txs)
if other_txs is None:
return None # not all txs present
return dict(header=self.header, txs=[self.check(tracker)] + other_txs)
class WeightsSkipList(forest.TrackerSkipList):
# share_count, weights, total_weight
def get_delta(self, element):
from p2pool.bitcoin import data as bitcoin_data
share = self.tracker.items[element]
att = bitcoin_data.target_to_average_attempts(share.target)
return 1, {share.new_script: att*(65535-share.share_data['donation'])}, att*65535, att*share.share_data['donation']
def combine_deltas(self, (share_count1, weights1, total_weight1, total_donation_weight1), (share_count2, weights2, total_weight2, total_donation_weight2)):
return share_count1 + share_count2, math.add_dicts(weights1, weights2), total_weight1 + total_weight2, total_donation_weight1 + total_donation_weight2
def initial_solution(self, start, (max_shares, desired_weight)):
assert desired_weight % 65535 == 0, divmod(desired_weight, 65535)
return 0, None, 0, 0
def apply_delta(self, (share_count1, weights_list, total_weight1, total_donation_weight1), (share_count2, weights2, total_weight2, total_donation_weight2), (max_shares, desired_weight)):
if total_weight1 + total_weight2 > desired_weight and share_count2 == 1:
assert (desired_weight - total_weight1) % 65535 == 0
script, = weights2.iterkeys()
new_weights = {script: (desired_weight - total_weight1)//65535*weights2[script]//(total_weight2//65535)}
return share_count1 + share_count2, (weights_list, new_weights), desired_weight, total_donation_weight1 + (desired_weight - total_weight1)//65535*total_donation_weight2//(total_weight2//65535)
return share_count1 + share_count2, (weights_list, weights2), total_weight1 + total_weight2, total_donation_weight1 + total_donation_weight2
def judge(self, (share_count, weights_list, total_weight, total_donation_weight), (max_shares, desired_weight)):
if share_count > max_shares or total_weight > desired_weight:
return 1
elif share_count == max_shares or total_weight == desired_weight:
return 0
else:
return -1
def finalize(self, (share_count, weights_list, total_weight, total_donation_weight), (max_shares, desired_weight)):
assert share_count <= max_shares and total_weight <= desired_weight
assert share_count == max_shares or total_weight == desired_weight
return math.add_dicts(*math.flatten_linked_list(weights_list)), total_weight, total_donation_weight
class OkayTracker(forest.Tracker):
def __init__(self, net):
forest.Tracker.__init__(self, delta_type=forest.get_attributedelta_type(dict(forest.AttributeDelta.attrs,
work=lambda share: bitcoin_data.target_to_average_attempts(share.target),
min_work=lambda share: bitcoin_data.target_to_average_attempts(share.max_target),
)))
self.net = net
self.verified = forest.SubsetTracker(delta_type=forest.get_attributedelta_type(dict(forest.AttributeDelta.attrs,
work=lambda share: bitcoin_data.target_to_average_attempts(share.target),
)), subset_of=self)
self.get_cumulative_weights = WeightsSkipList(self)
def attempt_verify(self, share):
if share.hash in self.verified.items:
return True
height, last = self.get_height_and_last(share.hash)
if height < self.net.CHAIN_LENGTH + 1 and last is not None:
raise AssertionError()
try:
share.check(self)
except:
log.err(None, 'Share check failed:')
return False
else:
self.verified.add(share)
return True
def think(self, block_rel_height_func, previous_block, bits, known_txs):
desired = set()
# O(len(self.heads))
# make 'unverified heads' set?
# for each overall head, attempt verification
# if it fails, attempt on parent, and repeat
# if no successful verification because of lack of parents, request parent
bads = set()
for head in set(self.heads) - set(self.verified.heads):
head_height, last = self.get_height_and_last(head)
for share in self.get_chain(head, head_height if last is None else min(5, max(0, head_height - self.net.CHAIN_LENGTH))):
if self.attempt_verify(share):
break
if share.hash in self.heads:
bads.add(share.hash)
else:
if last is not None:
desired.add((
self.items[random.choice(list(self.reverse[last]))].peer_addr,
last,
max(x.timestamp for x in self.get_chain(head, min(head_height, 5))),
min(x.target for x in self.get_chain(head, min(head_height, 5))),
))
for bad in bads:
assert bad not in self.verified.items
assert bad in self.heads
if p2pool.DEBUG:
print "BAD", bad
self.remove(bad)
# try to get at least CHAIN_LENGTH height for each verified head, requesting parents if needed
for head in list(self.verified.heads):
head_height, last_hash = self.verified.get_height_and_last(head)
last_height, last_last_hash = self.get_height_and_last(last_hash)
# XXX review boundary conditions
want = max(self.net.CHAIN_LENGTH - head_height, 0)
can = max(last_height - 1 - self.net.CHAIN_LENGTH, 0) if last_last_hash is not None else last_height
get = min(want, can)
#print 'Z', head_height, last_hash is None, last_height, last_last_hash is None, want, can, get
for share in self.get_chain(last_hash, get):
if not self.attempt_verify(share):
break
if head_height < self.net.CHAIN_LENGTH and last_last_hash is not None:
desired.add((
self.items[random.choice(list(self.verified.reverse[last_hash]))].peer_addr,
last_last_hash,
max(x.timestamp for x in self.get_chain(head, min(head_height, 5))),
min(x.target for x in self.get_chain(head, min(head_height, 5))),
))
# decide best tree
decorated_tails = sorted((self.score(max(self.verified.tails[tail_hash], key=self.verified.get_work), block_rel_height_func), tail_hash) for tail_hash in self.verified.tails)
if p2pool.DEBUG:
print len(decorated_tails), 'tails:'
for score, tail_hash in decorated_tails:
print format_hash(tail_hash), score
best_tail_score, best_tail = decorated_tails[-1] if decorated_tails else (None, None)
# decide best verified head
decorated_heads = sorted(((
self.verified.get_work(self.verified.get_nth_parent_hash(h, min(5, self.verified.get_height(h)))),
#self.items[h].peer_addr is None,
-self.items[h].should_punish_reason(previous_block, bits, self, known_txs)[0],
-self.items[h].time_seen,
), h) for h in self.verified.tails.get(best_tail, []))
if p2pool.DEBUG:
print len(decorated_heads), 'heads. Top 10:'
for score, head_hash in decorated_heads[-10:]:
print ' ', format_hash(head_hash), format_hash(self.items[head_hash].previous_hash), score
best_head_score, best = decorated_heads[-1] if decorated_heads else (None, None)
if best is not None:
best_share = self.items[best]
punish, punish_reason = best_share.should_punish_reason(previous_block, bits, self, known_txs)
if punish > 0:
print 'Punishing share for %r! Jumping from %s to %s!' % (punish_reason, format_hash(best), format_hash(best_share.previous_hash))
best = best_share.previous_hash
timestamp_cutoff = min(int(time.time()), best_share.timestamp) - 3600
target_cutoff = int(2**256//(self.net.SHARE_PERIOD*best_tail_score[1] + 1) * 2 + .5) if best_tail_score[1] is not None else 2**256-1
else:
timestamp_cutoff = int(time.time()) - 24*60*60
target_cutoff = 2**256-1
if p2pool.DEBUG:
print 'Desire %i shares. Cutoff: %s old diff>%.2f' % (len(desired), math.format_dt(time.time() - timestamp_cutoff), bitcoin_data.target_to_difficulty(target_cutoff))
for peer_addr, hash, ts, targ in desired:
print ' ', None if peer_addr is None else '%s:%i' % peer_addr, format_hash(hash), math.format_dt(time.time() - ts), bitcoin_data.target_to_difficulty(targ), ts >= timestamp_cutoff, targ <= target_cutoff
return best, [(peer_addr, hash) for peer_addr, hash, ts, targ in desired if ts >= timestamp_cutoff], decorated_heads
def score(self, share_hash, block_rel_height_func):
# returns approximate lower bound on chain's hashrate in the last self.net.CHAIN_LENGTH*15//16*self.net.SHARE_PERIOD time
head_height = self.verified.get_height(share_hash)
if head_height < self.net.CHAIN_LENGTH:
return head_height, None
end_point = self.verified.get_nth_parent_hash(share_hash, self.net.CHAIN_LENGTH*15//16)
block_height = max(block_rel_height_func(share.header['previous_block']) for share in
self.verified.get_chain(end_point, self.net.CHAIN_LENGTH//16))
return self.net.CHAIN_LENGTH, self.verified.get_delta(share_hash, end_point).work/((0 - block_height + 1)*self.net.PARENT.BLOCK_PERIOD)
def get_pool_attempts_per_second(tracker, previous_share_hash, dist, min_work=False, integer=False):
assert dist >= 2
near = tracker.items[previous_share_hash]
far = tracker.items[tracker.get_nth_parent_hash(previous_share_hash, dist - 1)]
attempts = tracker.get_delta(near.hash, far.hash).work if not min_work else tracker.get_delta(near.hash, far.hash).min_work
time = near.timestamp - far.timestamp
if time <= 0:
time = 1
if integer:
return attempts//time
return attempts/time
def get_average_stale_prop(tracker, share_hash, lookbehind):
stales = sum(1 for share in tracker.get_chain(share_hash, lookbehind) if share.share_data['stale_info'] is not None)
return stales/(lookbehind + stales)
def get_stale_counts(tracker, share_hash, lookbehind, rates=False):
res = {}
for share in tracker.get_chain(share_hash, lookbehind - 1):
res['good'] = res.get('good', 0) + bitcoin_data.target_to_average_attempts(share.target)
s = share.share_data['stale_info']
if s is not None:
res[s] = res.get(s, 0) + bitcoin_data.target_to_average_attempts(share.target)
if rates:
dt = tracker.items[share_hash].timestamp - tracker.items[tracker.get_nth_parent_hash(share_hash, lookbehind - 1)].timestamp
res = dict((k, v/dt) for k, v in res.iteritems())
return res
def get_user_stale_props(tracker, share_hash, lookbehind):
res = {}
for share in tracker.get_chain(share_hash, lookbehind - 1):
stale, total = res.get(share.share_data['pubkey_hash'], (0, 0))
total += 1
if share.share_data['stale_info'] is not None:
stale += 1
total += 1
res[share.share_data['pubkey_hash']] = stale, total
return dict((pubkey_hash, stale/total) for pubkey_hash, (stale, total) in res.iteritems())
def get_expected_payouts(tracker, best_share_hash, block_target, subsidy, net):
weights, total_weight, donation_weight = tracker.get_cumulative_weights(best_share_hash, min(tracker.get_height(best_share_hash), net.REAL_CHAIN_LENGTH), 65535*net.SPREAD*bitcoin_data.target_to_average_attempts(block_target))
res = dict((script, subsidy*weight//total_weight) for script, weight in weights.iteritems())
res[DONATION_SCRIPT] = res.get(DONATION_SCRIPT, 0) + subsidy - sum(res.itervalues())
return res
def get_desired_version_counts(tracker, best_share_hash, dist):
res = {}
for share in tracker.get_chain(best_share_hash, dist):
res[share.desired_version] = res.get(share.desired_version, 0) + bitcoin_data.target_to_average_attempts(share.target)
return res
def get_warnings(tracker, best_share, net, bitcoind_warning, bitcoind_work_value):
res = []
desired_version_counts = get_desired_version_counts(tracker, best_share,
min(net.CHAIN_LENGTH, 60*60//net.SHARE_PERIOD, tracker.get_height(best_share)))
majority_desired_version = max(desired_version_counts, key=lambda k: desired_version_counts[k])
if majority_desired_version > (Share.SUCCESSOR if Share.SUCCESSOR is not None else Share).VOTING_VERSION and desired_version_counts[majority_desired_version] > sum(desired_version_counts.itervalues())/2:
res.append('A MAJORITY OF SHARES CONTAIN A VOTE FOR AN UNSUPPORTED SHARE IMPLEMENTATION! (v%i with %i%% support)\n'
'An upgrade is likely necessary. Check http://p2pool.forre.st/ for more information.' % (
majority_desired_version, 100*desired_version_counts[majority_desired_version]/sum(desired_version_counts.itervalues())))
if bitcoind_warning is not None:
if 'This is a pre-release test build' not in bitcoind_warning:
res.append('(from bitcoind) %s' % (bitcoind_warning,))
if time.time() > bitcoind_work_value['last_update'] + 60:
res.append('''LOST CONTACT WITH BITCOIND for %s! Check that it isn't frozen or dead!''' % (math.format_dt(time.time() - bitcoind_work_value['last_update']),))
return res
def format_hash(x):
if x is None:
return 'xxxxxxxx'
return '%08x' % (x % 2**32)
class ShareStore(object):
def __init__(self, prefix, net):
self.filename = prefix
self.dirname = os.path.dirname(os.path.abspath(prefix))
self.filename = os.path.basename(os.path.abspath(prefix))
self.net = net
self.known = None # will be filename -> set of share hashes, set of verified hashes
self.known_desired = None
def get_shares(self):
if self.known is not None:
raise AssertionError()
known = {}
filenames, next = self.get_filenames_and_next()
for filename in filenames:
share_hashes, verified_hashes = known.setdefault(filename, (set(), set()))
with open(filename, 'rb') as f:
for line in f:
try:
type_id_str, data_hex = line.strip().split(' ')
type_id = int(type_id_str)
if type_id == 0:
pass
elif type_id == 1:
pass
elif type_id == 2:
verified_hash = int(data_hex, 16)
yield 'verified_hash', verified_hash
verified_hashes.add(verified_hash)
elif type_id == 5:
raw_share = share_type.unpack(data_hex.decode('hex'))
if raw_share['type'] in [0, 1, 2, 3, 4, 5, 6, 7, 8]:
continue
share = load_share(raw_share, self.net, None)
yield 'share', share
share_hashes.add(share.hash)
else:
raise NotImplementedError("share type %i" % (type_id,))
except Exception:
log.err(None, "HARMLESS error while reading saved shares, continuing where left off:")
self.known = known
self.known_desired = dict((k, (set(a), set(b))) for k, (a, b) in known.iteritems())
def _add_line(self, line):
filenames, next = self.get_filenames_and_next()
if filenames and os.path.getsize(filenames[-1]) < 10e6:
filename = filenames[-1]
else:
filename = next
with open(filename, 'ab') as f:
f.write(line + '\n')
return filename
def add_share(self, share):
for filename, (share_hashes, verified_hashes) in self.known.iteritems():
if share.hash in share_hashes:
break
else:
filename = self._add_line("%i %s" % (5, share_type.pack(share.as_share()).encode('hex')))
share_hashes, verified_hashes = self.known.setdefault(filename, (set(), set()))
share_hashes.add(share.hash)
share_hashes, verified_hashes = self.known_desired.setdefault(filename, (set(), set()))
share_hashes.add(share.hash)
def add_verified_hash(self, share_hash):
for filename, (share_hashes, verified_hashes) in self.known.iteritems():
if share_hash in verified_hashes:
break
else:
filename = self._add_line("%i %x" % (2, share_hash))
share_hashes, verified_hashes = self.known.setdefault(filename, (set(), set()))
verified_hashes.add(share_hash)
share_hashes, verified_hashes = self.known_desired.setdefault(filename, (set(), set()))
verified_hashes.add(share_hash)
def get_filenames_and_next(self):
suffixes = sorted(int(x[len(self.filename):]) for x in os.listdir(self.dirname) if x.startswith(self.filename) and x[len(self.filename):].isdigit())
return [os.path.join(self.dirname, self.filename + str(suffix)) for suffix in suffixes], os.path.join(self.dirname, self.filename + (str(suffixes[-1] + 1) if suffixes else str(0)))
def forget_share(self, share_hash):
for filename, (share_hashes, verified_hashes) in self.known_desired.iteritems():
if share_hash in share_hashes:
share_hashes.remove(share_hash)
self.check_remove()
def forget_verified_share(self, share_hash):
for filename, (share_hashes, verified_hashes) in self.known_desired.iteritems():
if share_hash in verified_hashes:
verified_hashes.remove(share_hash)
self.check_remove()
def check_remove(self):
to_remove = set()
for filename, (share_hashes, verified_hashes) in self.known_desired.iteritems():
#print filename, len(share_hashes) + len(verified_hashes)
if not share_hashes and not verified_hashes:
to_remove.add(filename)
for filename in to_remove:
self.known.pop(filename)
self.known_desired.pop(filename)
os.remove(filename)
print "REMOVED", filename
| gpl-3.0 | 4,844,746,086,630,213,000 | 52.386603 | 346 | 0.613365 | false |
perfsonar/pscheduler | pscheduler-tool-twping/twping/twping_utils.py | 1 | 1200 | ###
# utilities used by twping command
#
from twping_defaults import *
import configparser
import pscheduler
#Role constants
CLIENT_ROLE = 0
SERVER_ROLE = 1
log = pscheduler.Log(prefix="tool-twping", quiet=True)
##
# Determine whether particpant will act as client or server
def get_role(participant, test_spec):
# #Uncomment this when we do multi-participant
# role = None
# flip = test_spec.get('flip', False)
# single_participant_mode = test_spec.get('single-participant-mode', False)
# if participant == 0:
# if single_participant_mode:
# role = CLIENT_ROLE
# elif flip:
# role = SERVER_ROLE
# else:
# role = CLIENT_ROLE
# elif participant == 1:
# if flip:
# role = CLIENT_ROLE
# else:
# role = SERVER_ROLE
# else:
# pscheduler.fail("Invalid participant.")
return CLIENT_ROLE
##
# Open config file
def get_config():
config = None
try:
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
except:
log.warning("Unable to read configuration file %s. Proceeding with defaults.")
return config
| apache-2.0 | -232,647,965,472,448,130 | 23.489796 | 86 | 0.610833 | false |
andresmrm/rss2email | rss2email/__init__.py | 1 | 1661 | # Copyright (C) 2012-2013 W. Trevor King <[email protected]>
#
# This file is part of rss2email.
#
# rss2email is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) version 3 of
# the License.
#
# rss2email is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# rss2email. If not, see <http://www.gnu.org/licenses/>.
"""rss2email: get RSS feeds emailed to you
"""
import logging as _logging
import sys as _sys
__version__ = '3.6'
__url__ = 'https://github.com/wking/rss2email'
__author__ = 'W. Trevor King'
__email__ = '[email protected]'
__copyright__ = '(C) 2004 Aaron Swartz. GNU GPL 2 or 3.'
__contributors__ = [
'Aaron Swartz (original author)',
'Brian Lalor',
'Dean Jackson',
'Eelis van der Weegen',
'Erik Hetzner',
'Etienne Millon',
'George Saunders',
'Joey Hess',
'Lindsey Smith ([email protected])',
'Marcel Ackermann (http://www.DreamFlasher.de)',
"Martin 'Joey' Schulze",
'Matej Cepl',
'W. Trevor King',
]
LOG = _logging.getLogger('rss2email')
LOG.addHandler(_logging.StreamHandler())
LOG.setLevel(_logging.ERROR)
if _sys.version_info < (3, 2):
raise ImportError(
"rss2email requires Python 3.2, but you're using:\n{}".format(
_sys.version))
| gpl-2.0 | -7,075,809,744,930,004,000 | 30.339623 | 79 | 0.684527 | false |
carborgar/metropol | metropol_abogados/views/ExpedientViews.py | 1 | 3817 | from django.shortcuts import render_to_response
from django.template import RequestContext
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import permission_required
from django.views import generic
from django.db.models import Q
from metropol_abogados.services import ExpedientService
from metropol_abogados.models import Expedient
from metropol_abogados.forms import ExpedientForm, ExpedientListFilterForm
def get_redirect(request, expedient_id):
msg = "Expediente %s correctamente" % ("guardado" if not expedient_id else "editado")
messages.success(request, msg)
if expedient_id:
return HttpResponseRedirect(reverse('expedient-details', args=(expedient_id,)))
else:
return HttpResponseRedirect(reverse('expedient-list'))
@permission_required('auth.management_metropol')
def edit(request, expedient_id=None):
if request.method == 'POST':
form = ExpedientForm(request.POST)
if form.is_valid():
ExpedientService.save_from_form(form)
return get_redirect(request, expedient_id)
else:
initial_data = {'expedient_num': Expedient.objects.latest().id + 1}
if expedient_id:
expedient = get_object_or_404(Expedient, id=expedient_id)
initial_data = ExpedientService.build_initial_data(expedient)
form = ExpedientForm(initial=initial_data)
return render_to_response("expedient/edit.html", {'form': form}, context_instance=RequestContext(request))
@permission_required('auth.management_metropol')
def expedient_list(request):
form = ExpedientListFilterForm(request.GET)
expedients = ExpedientService.find_all()
if form.is_valid():
search_term = form.cleaned_data['keyword'] or None
selected_branch_id = form.cleaned_data['branch'] or None
selected_headquarters_id = form.cleaned_data['headquarters'] or None
selected_state = form.cleaned_data['state'] or None
selected_customers = form.cleaned_data['customers'] or None
if search_term:
expedients = expedients.filter(Q(id__icontains=search_term) | Q(description__icontains=search_term))
# Remember -> -1 equals "without" and None is "all"
if selected_branch_id:
if selected_branch_id == '-1':
# Filter expedients without branch
expedients = expedients.filter(phase__isnull=True)
else:
expedients = expedients.filter(phase__law_branch__id=selected_branch_id)
if selected_headquarters_id:
if selected_headquarters_id == '-1':
# Filter expedients without headquarters
expedients = expedients.filter(headquarters__isnull=True)
else:
expedients = expedients.filter(headquarters__id=selected_headquarters_id)
if selected_state:
expedients = expedients.filter(state=selected_state)
if selected_customers:
expedients = expedients.filter(expperrol__person__in=selected_customers, expperrol__role__text_help__iexact='CLIENTE').distinct()
return render_to_response("expedient/list.html", {"expedients": expedients, 'filter_form': form}, context_instance=RequestContext(request))
class DetailsView(generic.DetailView):
model = Expedient
template_name = 'expedient/details.html'
@permission_required('auth.management_metropol')
def delete(request, expedient_id):
expedient = get_object_or_404(Expedient, id=expedient_id)
expedient.delete()
messages.success(request, "Se ha borrado el expediente correctamente.")
return HttpResponseRedirect(reverse('expedient-list'))
| mit | -1,160,492,237,735,441,200 | 39.178947 | 143 | 0.700812 | false |
ibamacsr/indicar-process | indicarprocess/tmsapi/views.py | 2 | 1134 | from rest_framework.generics import ListAPIView, RetrieveAPIView
from catalogo.models import CatalogoLandsat, CatalogoRapidEye
from .serializers import LandsatSerializer, RapidEyeSerializer
class LandsatListAPI(ListAPIView):
serializer_class = LandsatSerializer
def get_queryset(self):
bbox = self.request.query_params.get('extent', None)
if bbox:
return CatalogoLandsat.objects.filter(geom__intersects=bbox).order_by('data')
else:
return []
class RapidEyeListAPI(ListAPIView):
serializer_class = RapidEyeSerializer
def get_queryset(self):
bbox = self.request.query_params.get('extent', None)
if bbox:
return CatalogoRapidEye.objects.filter(geom__intersects=bbox).order_by('data')
else:
return []
class LandsatDetailView(RetrieveAPIView):
queryset = CatalogoLandsat.objects.all()
serializer_class = LandsatSerializer
lookup_field = 'image'
class RapidEyeDetailView(RetrieveAPIView):
queryset = CatalogoRapidEye.objects.all()
serializer_class = RapidEyeSerializer
lookup_field = 'image'
| agpl-3.0 | 7,511,570,201,475,237,000 | 28.076923 | 90 | 0.710758 | false |
pauloacmelo/papelex_winthor | 9813_ui_examples.py | 1 | 3575 | # coding: utf-8
from base import *
from PySide import QtGui, QtCore
import requests
import json
import urllib2
class Routine9812(WinthorRoutine):
def __init__(self, *args):
# super(WinthorRoutine, self).__init__('TESTE')
print(args)
super(Routine9812, self).__init__(args[4] or 9812, u'Cálculo de Frete', *args)
self.initUI()
def initUI(self):
super(Routine9812, self).initUI()
# saself.form = QFormLayout(self)
textInput = QtGui.QLineEdit(self)
self.mainwindow.addWidget(textInput)
combo = QtGui.QComboBox(self)
self.mainwindow.addWidget(combo)
combo.addItem(u'Opção 1', combo)
combo.addItem(u'Opção 2', combo)
but = QtGui.QPushButton('TEST', self)
but.clicked.connect(self.buttonAction)
self.mainwindow.addWidget(but)
table_view = QtGui.QTableView(self)
header = [u'Transportadora', u'Preço', u'Cubagem', u'Prazo']
data = [
['1, 1', '1, 2', '1, 3'],
['2, 1', '2, 2', '2, 3'],
['3, 1', '3, 2', '3, 3'],]
table_view.setModel(QTableModel(self, data, header))
self.mainwindow.addWidget(table_view)
def buttonAction(self):
print self.db.query('select CODPROD from PCPEDI where NUMPED = %s' % 224010951)
def quote_order_shipping(order_id):
self.quotation()
# destination_zip_code example: '20756-200'
# products example: [{"weight": 2.1,"cost_of_goods": 101.23,"width": 13,"height": 10,"length": 10,"quantity": 1,"sku_id": "1","description": "descrição do item","can_group": "true"}]
def quotation(destination_zip_code, products):
data = {
"origin_zip_code": "21010-410",
"destination_zip_code": destination_zip_code,
"products": products,
"quoting_mode": "DYNAMIC_BOX_ALL_ITEMS",
"additional_information": {
"free_shipping": False,
"extra_cost_absolute": 0,
"extra_cost_percentage": 0,
"lead_time_business_days": 0,
"sales_channel": "hotsite",
"tax_id": "22337462000127",
"client_type": "gold",
"payment_type": "",
"is_state_tax_payer": False,
"delivery_method_ids": []
},
"identification": {
"session": "04e5bdf7ed15e571c0265c18333b6fdf1434658753",
"page_name": "carrinho",
"ip": "000.000.000.000",
"url": "http://www.intelipost.com.br/checkout/cart/"
}
}
req = urllib2.Request('https://api.intelipost.com.br/api/v1/quote_by_product', json.dumps(data))
req.add_header('Content-Type', 'application/json')
req.add_header('api_key', '36a3fa0d4108231864a60988a15272b9fd692c3320206ceb3e85e61688e11d79')
res = urllib2.urlopen(req)
return json.loads(res.read())
class ErrorMessage(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
QtGui.QMessageBox.critical(self,
"Erro!",
"Utilize a rotina a partir do menu.")
self.close()
# Expected call: routine.exe USER DB_PASS DB_ALIAS DB_USER ROUTINE_NUMBER
def main(args):
app = QtGui.QApplication([])
if len(args) != 6:
print('Erro! Número de parâmetros diferente do esperado.')
print('Esperado: 6. Encontrado: %s' % len(args))
ErrorMessage()
return
args = args[1:]
ex = Routine9812(*args)
sys.exit(app.exec_())
if __name__ == '__main__':
main(sys.argv)
| mit | 4,274,113,163,035,480,000 | 35.010101 | 186 | 0.580084 | false |
delijati/pysimiam-simulator | scripts/ui.py | 1 | 9774 | try:
import Queue as queue
except ImportError:
import queue
from helpers import Struct
class uiParameter(Struct):
"""uiParameter represents a single GUI element that is used to build a parameter window
in the UI (simulator event "make_param_window").
It has one parameter, ``type``, that defines the type of the parameter. Possible parameter
types are GROUP, INT, FLOAT, BOOL and SELECT.
"""
GROUP, INT, FLOAT, BOOL, SELECT = 0, 1, 2, 3, 4
def __init__(self, elem_type):
self.type = elem_type
class uiGroup(uiParameter):
def __init__(self, contents):
uiParameter.__init__(uiParameter.GROUP)
self.contents = contents
class uiInt(uiParameter):
def __init__(self, value, min_value=-100, max_value=100):
uiParameter.__init__(self, uiParameter.INT)
self.value = value
self.min_value = min_value
self.max_value = max_value
class uiFloat(uiParameter):
def __init__(self, value, step=1.0, min_value=-1000.0, max_value=1000.0):
uiParameter.__init__(self, uiParameter.FLOAT)
self.value = value
self.step = step
self.min_value = min_value
self.max_value = max_value
class uiBool(uiParameter):
def __init__(self, value):
uiParameter.__init__(self, uiParameter.BOOL)
self.value = value
class uiSelect(uiParameter):
def __init__(self, value, value_list):
uiParameter.__init__(self, uiParameter.SELECT, value, value_list)
self.value = value
self.value_list = value_list
class SimUI(object):
"""The SimUI class defines a front-end for the :class:`~simulator.Simulator`.
It contains the necessary functions for the frontend-simulator
communication and stubs for the message callbacks.
This class manages three important objects:
* The simulator, as ``self.simulator_thread``
* The incoming simulator events, as ``self.in_queue``
* The outgoing simulator commands, as ``self.sim_queue``
The constructor of SimUI takes a :class:`~renderer.Renderer` object as
parameter. This renderer will be passed to the simulator to draw on.
"""
def __init__(self, renderer, simulator_class):
self.event_handler = None
self.sim_queue = queue.Queue()
# create the simulator thread
self.simulator_thread = simulator_class(renderer, self.sim_queue)
self.in_queue = self.simulator_thread._out_queue
self.simulator_thread.start()
def register_event_handler(self, event_handler):
"""Register a callback that will be executed to process the
"""
self.event_handler = event_handler
def unregister_event_handler(self):
"""Unregister a previously registered event handler.
"""
self.event_handler = None
def process_events(self, process_all=False):
"""Processes one or all incoming events from the simulator. A single
event is a tuple (name,args). During the processing of the event,
the function ``simulator_``\ *name* will be called with args as parameters.
It is strongly discouraged to create new class methods with the name
starting with `simulator_`. Such functions could be called from
the simulator without your consent.
Unknown or malformed events will lead to an error message printed
to the console.
"""
while not self.in_queue.empty():
tpl = self.in_queue.get()
if isinstance(tpl, tuple) and len(tpl) == 2:
name, args = tpl
intercepted = False
if self.event_handler is not None:
intercepted = self.event_handler(name, args)
if not intercepted:
# Scramble
name = "simulator_{}".format(name)
if name in self.__class__.__dict__:
try:
self.__class__.__dict__[name](self, *args)
except TypeError:
print(
"Wrong UI event parameters {}{}".format(
name,
args))
raise
else:
print("Unknown UI event '{}'".format(name))
else:
print("Wrong UI event format '{}'".format(tpl))
self.in_queue.task_done()
if not process_all:
return
def run_simulator_command(self, command, *args):
"""Sends the command *command* to the simulator. All arguments after
*command* are passed to the command processing function on the
simulator side.
See :class:`~simulator.Simulator` for the available commands.
"""
self.sim_queue.put((command, args))
# Simulator processing functions : stubs
def simulator_make_param_window(self, robot_id, name, parameters):
"""A request from the supervisor to create a parameter window.
*robot_id* is guaranteed to uniquely identify a robot in a
simulation. Currently, *robot_id* is the actual robot object.
It can be used e.g. to extract the color of the robot as
``robot_id.get_color()``. *name* is the desired window name, and
*parameters* is the structure
returned by :meth:`~supervisor.Supervisor.get_ui_description`.
"""
raise NotImplementedError('SimUI.simulator_make_param_window')
def simulator_running(self):
"""A notification that the simulation has been started."""
raise NotImplementedError('SimUI.simulator_running')
def simulator_paused(self):
"""A notification that the simulation has been paused."""
raise NotImplementedError('SimUI.simulator_paused')
def simulator_reset(self):
"""A notification that the simulation has been reset."""
raise NotImplementedError('SimUI.simulator_reset')
def simulator_stopped(self):
"""A notification that the simulation has been stopped."""
raise NotImplementedError('SimUI.simulator_stopped')
def simulator_update_view(self):
"""A request to redraw the simulation window. This notification
signifies that the simulation has stopped using the renderer,
and is waiting for the UI to process this event.
The simulation will be resumed after this function exits.
"""
raise NotImplementedError('SimUI.simulator_update_view')
def simulator_exception(self, e_type, e_value, e_traceback):
"""An exception was raised in the simulator thread in the attempt
to process an incoming command.
"""
raise NotImplementedError('SimUI.simulator_exception')
def simulator_log(self, message, objclass, objcolor):
"""A log *message* was generated by one of the simulation objects
of class *objclass*. The *objcolor* is the color of the simobject,
in the case the object is connected to one, and None otherwise.
"""
raise NotImplementedError('SimUI.simulator_log')
# Commands for the tester:
def run_simulation(self):
"""Unpause the simulation."""
self.run_simulator_command('start_simulation')
def pause_simulation(self):
"""Pause the simulation."""
self.run_simulator_command('pause_simulation')
def step_simulation(self):
"""Advance the simulation one step if it is paused."""
self.run_simulator_command('step_simulation')
def start_testing(self):
"""Prepare the simulation environment for testing, e.g. disable
user controls of the simulation progress."""
pass
def stop_testing(self):
"""Return UI back to normal operation."""
pass
# def get_view_parameters(self):
# pass
# def set_view_parameters(self,params):
# pass
# def new_renderer(self):
# pass
# def pop_renderer(self):
# pass
# def start_test(self):
#"""This function will pause and 'cache' the currently running
# simulation. A new `simulator.Simulator` will be started with
# the control belonging to the tester object.
#"""
#self.antiteststruct = Struct()
#self.antiteststruct.wasrunning = False
# 1) Pause simulator
# if self.simulator_thread.is_running():
# self.antiteststruct.wasrunning = True # Remember the setting
# self.run_simulator_command('pause_simulation') # Pause simulation
# self.process_events(True) # Process all events
# 2) Create new simulator
#self.antiteststruct.simulator = simulator_thread
#self.simulator_thread = sim.Simulator(self.instantiate_new_renderer(), self.sim_queue)
# self.simulator_thread.start()
# def stop_test(self):
#"""This function will restore the cached simulation and
# simulation. A new `simulator.Simulator` will be started with
# the control belonging to the tester object.
#"""
#view_params = self.get_view_parameters()
# 1) Stop simulator
# self.run_simulator_command('stop')
# while self.simulator_thread.isAlive():
# self.process_events(True)
# self.simulator_thread.join(0.1)
# 2) Switch to old simulator
# self.pop_renderer()
#self.simulator_thread = self.antiteststruct.simulator
# 3) continue running
# if self.antiteststruct.wasrunning:
# self.run_simulator_command('pause_simulation')
| gpl-2.0 | 6,235,265,488,941,697,000 | 34.933824 | 97 | 0.613669 | false |
jrversteegh/softsailor | softsailor/tst/test_router.py | 1 | 1177 | import unittest
import testing_helper
from softsailor.chart import Chart
from softsailor.course import Course
from softsailor.boat import SailBoat
from softsailor.router import *
from geofun import Position
class TestRouter(unittest.TestCase):
def testConstruction(self):
boat = SailBoat()
course = Course()
chart = Chart()
router = Router(boat=boat, course=course, chart=chart)
router.construct_legs()
self.assertEqual(1, len(router.legs))
def testValidRoute(self):
p1 = Position(0.9, 0.9)
p2 = Position(1.0, 1.0)
p3 = Position(1.0, 1.1)
pt1 = Position(0.99, 1.0)
pt2 = Position(1.01, 1.0)
r1 = Route((p1, pt1, p3))
r2 = Route((p1, pt2, p3))
c = Course((p1, p2, p3))
boat = SailBoat()
chart = Chart()
router = Router(boat=boat, course=c, chart=chart)
self.assertFalse(router.valid_route(r1))
self.assertTrue(router.valid_route(r2))
c.marks[0].to_port = True
self.assertTrue(router.valid_route(r1))
self.assertFalse(router.valid_route(r2))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 5,131,128,523,549,602,000 | 28.425 | 62 | 0.610875 | false |
eviljeff/zamboni | mkt/search/tests/test_filters.py | 1 | 15526 | # -*- coding: utf-8 -*-
import json
from nose.tools import eq_, ok_
from rest_framework.exceptions import ParseError
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from django.test.utils import override_settings
import mkt
from mkt.constants.applications import DEVICE_CHOICES_IDS
from mkt.constants.features import FeatureProfile
from mkt.search.filters import (DeviceTypeFilter, ProfileFilter,
PublicAppsFilter, PublicSearchFormFilter,
RegionFilter, SearchQueryFilter, SortingFilter,
ValidAppsFilter)
from mkt.search.forms import TARAKO_CATEGORIES_MAPPING
from mkt.search.views import SearchView
from mkt.site.tests import TestCase
from mkt.webapps.indexers import WebappIndexer
class FilterTestsBase(TestCase):
def setUp(self):
super(FilterTestsBase, self).setUp()
self.req = RequestFactory().get('/')
self.req.user = AnonymousUser()
self.view_class = SearchView
def _filter(self, req=None, data=None):
req = req or RequestFactory().get('/', data=data or {})
req.user = AnonymousUser()
queryset = WebappIndexer.search()
for filter_class in self.filter_classes:
queryset = filter_class().filter_queryset(req, queryset,
self.view_class)
return queryset.to_dict()
class TestQueryFilter(FilterTestsBase):
filter_classes = [SearchQueryFilter]
def test_q(self):
qs = self._filter(data={'q': 'search terms'})
# Spot check a few queries.
should = (qs['query']['function_score']['query']['bool']['should'])
ok_({'match': {'name': {'query': 'search terms', 'boost': 4,
'slop': 1, 'type': 'phrase'}}}
in should)
ok_({'prefix': {'name': {'boost': 1.5, 'value': 'search terms'}}}
in should)
ok_({'match': {'name_l10n_english': {'query': 'search terms',
'boost': 2.5}}}
in should)
ok_({'match': {'description_l10n_english':
{'query': 'search terms',
'boost': 0.6,
'analyzer': 'english_analyzer',
'type': 'phrase'}}} in should)
def test_fuzzy_single_word(self):
qs = self._filter(data={'q': 'term'})
should = (qs['query']['function_score']['query']['bool']['should'])
ok_({'fuzzy': {'tags': {'prefix_length': 1, 'value': 'term'}}}
in should)
def test_no_fuzzy_multi_word(self):
qs = self._filter(data={'q': 'search terms'})
qs_str = json.dumps(qs)
ok_('fuzzy' not in qs_str)
@override_settings(ES_USE_PLUGINS=True)
def test_polish_analyzer(self):
"""
Test that the polish analyzer is included correctly since it is an
exception to the rest b/c it is a plugin.
"""
with self.activate(locale='pl'):
qs = self._filter(data={'q': u'próba'})
should = (qs['query']['function_score']['query']['bool']['should'])
ok_({'match': {'name_l10n_polish': {'query': u'pr\xf3ba',
'boost': 2.5}}}
in should)
ok_({'match': {'description_l10n_polish': {'query': u'pr\xf3ba',
'boost': 0.6,
'analyzer': 'polish',
'type': 'phrase'}}}
in should)
class TestFormFilter(FilterTestsBase):
filter_classes = [PublicSearchFormFilter]
def test_category(self):
qs = self._filter(data={'cat': 'games'})
ok_({'terms': {'category': ['games']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_tag(self):
qs = self._filter(data={'tag': 'tarako'})
ok_({'term': {'tags': 'tarako'}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_tarako_categories(self):
qs = self._filter(data={'cat': 'tarako-lifestyle'})
ok_({'terms':
{'category': TARAKO_CATEGORIES_MAPPING['tarako-lifestyle']}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'cat': 'tarako-games'})
ok_({'terms': {'category': TARAKO_CATEGORIES_MAPPING['tarako-games']}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'cat': 'tarako-tools'})
ok_({'terms': {'category': TARAKO_CATEGORIES_MAPPING['tarako-tools']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_app_type(self):
qs = self._filter(data={'app_type': ['hosted']})
ok_({'terms': {'app_type': [1]}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_app_type_packaged(self):
"""Test packaged also includes privileged."""
qs = self._filter(data={'app_type': ['packaged']})
ok_({'terms': {'app_type': [2, 3]}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_manifest_url(self):
url = 'http://hy.fr/manifest.webapp'
qs = self._filter(data={'manifest_url': url})
ok_({'term': {'manifest_url': url}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_offline(self):
"""Ensure we are filtering by offline-capable apps."""
qs = self._filter(data={'offline': 'True'})
ok_({'term': {'is_offline': True}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_online(self):
"""Ensure we are filtering by apps that require online access."""
qs = self._filter(data={'offline': 'False'})
ok_({'term': {'is_offline': False}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_offline_and_online(self):
"""Ensure we are not filtering by offline/online by default."""
# Pass any form values other than 'offline' to create the dict.
qs = self._filter(data={'cat': 'games'})
ok_({'term': {'is_offline': True}}
not in qs['query']['filtered']['filter']['bool']['must'])
ok_({'term': {'is_offline': False}}
not in qs['query']['filtered']['filter']['bool']['must'])
def test_languages(self):
qs = self._filter(data={'languages': 'fr'})
ok_({'terms': {'supported_locales': ['fr']}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'languages': 'ar,en-US'})
ok_({'terms': {'supported_locales': ['ar', 'en-US']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_author(self):
qs = self._filter(data={'author': 'Mozilla LABS'})
ok_({'term': {'author.raw': u'mozilla labs'}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_installs_allowed_from(self):
qs = self._filter(data={'installs_allowed_from': '*'})
ok_({'term': {'installs_allowed_from': u'*'}}
in qs['query']['filtered']['filter']['bool']['must'])
# Test that we don't filter by this field if not provided.
qs = self._filter()
ok_('installs_allowed_from' not in json.dumps(qs),
"Unexpected 'installs_allowed_from' in query")
def test_premium_types(self):
def ptype(p):
return mkt.ADDON_PREMIUM_API_LOOKUP.get(p)
# Test a single premium type.
qs = self._filter(data={'premium_types': ['free']})
ok_({'terms': {'premium_type': [ptype('free')]}}
in qs['query']['filtered']['filter']['bool']['must'])
# Test many premium types.
qs = self._filter(data={'premium_types': ['free', 'free-inapp']})
ok_({'terms': {'premium_type': [ptype('free'), ptype('free-inapp')]}}
in qs['query']['filtered']['filter']['bool']['must'])
# Test a non-existent premium type.
with self.assertRaises(ParseError):
self._filter(data={'premium_types': ['free', 'platinum']})
def test_device(self):
qs = self._filter(data={'dev': 'desktop'})
ok_({'term': {'device': DEVICE_CHOICES_IDS['desktop']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_no_device_with_device_type(self):
"""Test that providing a device type w/o device doesn't filter."""
qs = self._filter(data={'dev': '', 'device': 'firefoxos'})
ok_('filtered' not in qs['query'].keys())
class TestPublicAppsFilter(FilterTestsBase):
filter_classes = [PublicAppsFilter]
def test_status(self):
qs = self._filter(self.req)
ok_({'term': {'status': mkt.STATUS_PUBLIC}}
in qs['query']['filtered']['filter']['bool']['must'])
ok_({'term': {'is_disabled': False}}
in qs['query']['filtered']['filter']['bool']['must'])
class TestValidAppsFilter(FilterTestsBase):
filter_classes = [ValidAppsFilter]
def test_status(self):
qs = self._filter(self.req)
ok_({'terms': {'status': mkt.VALID_STATUSES}}
in qs['query']['filtered']['filter']['bool']['must'])
ok_({'term': {'is_disabled': False}}
in qs['query']['filtered']['filter']['bool']['must'])
class TestDeviceTypeFilter(FilterTestsBase):
filter_classes = [DeviceTypeFilter]
def test_no_filters(self):
qs = self._filter(self.req)
ok_('filtered' not in qs['query'].keys())
def test_mobile(self):
self.req.MOBILE = True
qs = self._filter(self.req)
ok_({'term': {'uses_flash': False}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_gaia(self):
self.req.GAIA = True
qs = self._filter(self.req)
ok_({'term': {'uses_flash': False}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_tablet(self):
self.req.TABLET = True
qs = self._filter(self.req)
ok_('filtered' not in qs['query'].keys())
def test_device_in_querystring(self):
qs = self._filter(data={'dev': 'desktop'})
ok_({'term': {'device': 1}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'dev': 'android', 'device': 'mobile'})
ok_({'term': {'device': 2}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'dev': 'android', 'device': 'tablet'})
ok_({'term': {'device': 3}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'dev': 'firefoxos'})
ok_({'term': {'device': 4}}
in qs['query']['filtered']['filter']['bool']['must'])
class TestRegionFilter(FilterTestsBase):
filter_classes = [RegionFilter]
def test_no_region_default(self):
qs = self._filter(self.req)
ok_({'term': {'region_exclusions': mkt.regions.RESTOFWORLD.id}}
in qs['query']['filtered']['filter']['bool']['must_not'])
def test_region(self):
self.req.REGION = mkt.regions.BRA
qs = self._filter(self.req)
ok_({'term': {'region_exclusions': mkt.regions.BRA.id}}
in qs['query']['filtered']['filter']['bool']['must_not'])
class TestProfileFilter(FilterTestsBase):
filter_classes = [ProfileFilter]
def profile_qs(self, disabled_features=None):
if disabled_features is None:
disabled_features = {}
profile = FeatureProfile().fromkeys(FeatureProfile(), True)
for feature in disabled_features:
profile[feature] = False
return {'pro': profile.to_signature(), 'dev': 'firefoxos'}
def test_filter_all_features_present(self):
qs = self._filter(data=self.profile_qs())
ok_('filtered' not in qs['query'].keys())
def test_filter_one_feature_present(self):
qs = self._filter(data=self.profile_qs(disabled_features=['sms']))
ok_({'term': {'features.has_sms': True}}
in qs['query']['filtered']['filter']['bool']['must_not'])
def test_filter_one_feature_present_desktop(self):
data = self.profile_qs(disabled_features=['sms'])
data['dev'] = 'desktop'
qs = self._filter(data=data)
ok_('filtered' not in qs['query'].keys())
def test_filter_multiple_features_present(self):
qs = self._filter(
data=self.profile_qs(disabled_features=['sms', 'apps']))
ok_({'term': {'features.has_sms': True}}
in qs['query']['filtered']['filter']['bool']['must_not'])
ok_({'term': {'features.has_apps': True}}
in qs['query']['filtered']['filter']['bool']['must_not'])
class TestSortingFilter(FilterTestsBase):
filter_classes = [SortingFilter]
def test_sort(self):
for api_sort, es_sort in SortingFilter.DEFAULT_SORTING.items():
qs = self._filter(data={'sort': [api_sort]})
if es_sort.startswith('-'):
ok_({es_sort[1:]: {'order': 'desc'}} in qs['sort'], qs)
else:
eq_([es_sort], qs['sort'], qs)
def test_sort_multiple(self):
qs = self._filter(data={'sort': ['rating', 'created']})
ok_({'bayesian_rating': {'order': 'desc'}} in qs['sort'])
ok_({'created': {'order': 'desc'}} in qs['sort'])
def test_sort_regional(self):
"""Popularity and trending use regional sorting for mature regions."""
req = RequestFactory().get('/')
req.REGION = mkt.regions.BRA
# Default empty query searches use popularity.
qs = self._filter(req)
ok_({'popularity_%s'
% mkt.regions.BRA.id: {'order': 'desc'}} in qs['sort'])
# Popularity.
req = RequestFactory().get('/', data={'sort': ['popularity']})
req.REGION = mkt.regions.BRA
qs = self._filter(req)
ok_({'popularity_%s'
% mkt.regions.BRA.id: {'order': 'desc'}} in qs['sort'])
# Trending.
req = RequestFactory().get('/', data={'sort': ['trending']})
req.REGION = mkt.regions.BRA
qs = self._filter(req)
ok_({'trending_%s' % mkt.regions.BRA.id: {'order': 'desc'}}
in qs['sort'])
class TestCombinedFilter(FilterTestsBase):
"""
Basic test to ensure that when filters are combined they result in the
expected query structure.
"""
filter_classes = [SearchQueryFilter, PublicSearchFormFilter,
PublicAppsFilter, SortingFilter]
def test_combined(self):
qs = self._filter(data={'q': 'test', 'cat': 'games',
'sort': 'trending'})
ok_(qs['query']['filtered']['query']['function_score'])
ok_(qs['query']['filtered']['filter'])
must = qs['query']['filtered']['filter']['bool']['must']
ok_({'terms': {'category': ['games']}} in must)
ok_({'term': {'status': 4}} in must)
ok_({'term': {'is_disabled': False}} in must)
ok_({'trending': {'order': 'desc'}} in qs['sort'])
query = qs['query']['filtered']['query']
ok_({'field_value_factor': {'field': 'boost'}}
in query['function_score']['functions'])
ok_({'match': {'name_l10n_english': {'boost': 2.5, 'query': u'test'}}}
in query['function_score']['query']['bool']['should'])
| bsd-3-clause | 1,205,160,211,720,074,000 | 38.705882 | 79 | 0.543833 | false |
rht/zulip | zerver/lib/redis_utils.py | 1 | 2543 | from django.conf import settings
from typing import Any, Dict, Optional
from zerver.lib.utils import generate_random_token
import re
import redis
import ujson
# Redis accepts keys up to 512MB in size, but there's no reason for us to use such size,
# so we want to stay limited to 1024 characters.
MAX_KEY_LENGTH = 1024
class ZulipRedisError(Exception):
pass
class ZulipRedisKeyTooLongError(ZulipRedisError):
pass
class ZulipRedisKeyOfWrongFormatError(ZulipRedisError):
pass
def get_redis_client() -> redis.StrictRedis:
return redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT,
password=settings.REDIS_PASSWORD, db=0)
def put_dict_in_redis(redis_client: redis.StrictRedis, key_format: str,
data_to_store: Dict[str, Any],
expiration_seconds: int,
token_length: int=64) -> str:
key_length = len(key_format) - len('{token}') + token_length
if key_length > MAX_KEY_LENGTH:
error_msg = "Requested key too long in put_dict_in_redis. Key format: %s, token length: %s"
raise ZulipRedisKeyTooLongError(error_msg % (key_format, token_length))
token = generate_random_token(token_length)
key = key_format.format(token=token)
with redis_client.pipeline() as pipeline:
pipeline.set(key, ujson.dumps(data_to_store))
pipeline.expire(key, expiration_seconds)
pipeline.execute()
return key
def get_dict_from_redis(redis_client: redis.StrictRedis, key_format: str, key: str
) -> Optional[Dict[str, Any]]:
# This function requires inputting the intended key_format to validate
# that the key fits it, as an additionally security measure. This protects
# against bugs where a caller requests a key based on user input and doesn't
# validate it - which could potentially allow users to poke around arbitrary redis keys.
if len(key) > MAX_KEY_LENGTH:
error_msg = "Requested key too long in get_dict_from_redis: %s"
raise ZulipRedisKeyTooLongError(error_msg % (key,))
validate_key_fits_format(key, key_format)
data = redis_client.get(key)
if data is None:
return None
return ujson.loads(data)
def validate_key_fits_format(key: str, key_format: str) -> None:
assert "{token}" in key_format
regex = key_format.format(token=r"[a-z0-9]+")
if not re.fullmatch(regex, key):
raise ZulipRedisKeyOfWrongFormatError("%s does not match format %s" % (key, key_format))
| apache-2.0 | -6,855,087,069,317,244,000 | 38.734375 | 99 | 0.681085 | false |
kratzer/bsm | bsm.py | 1 | 11076 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Kai Kratzer, Stuttgart, Germany; all rights
# reserved unless otherwise stated.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
#
# Sound playing machine using pygame
# Further information in the "README" and "COPYING" files.
#
# Dependencies: apt-get install python-pygame
#
# directory listing
import glob
# system
import os
import sys
# parsing
import re
# random numbers
import random
# pygame (main window, sounds, events, timer)
import pygame
# calculations
import math
# pygame local variables
from pygame.locals import *
# Screen settings
width=1366
height=768
fullscreen = False
# Soundfile extensions
# not all is possible, look at the pygame documentation
sndfile_extensions = ['wav']
# Keybindings for the sounds (e.g. if no mouse/touch is available)
keybindings = { \
K_a: 'alkohol', \
K_b: 'bang', \
K_e: 'bier', \
K_q: 'dead', \
K_d: 'dynamit', \
K_f: 'fehlschuss', \
K_h: 'freude', \
K_g: 'gatling', \
K_s: 'general_store', \
K_i: 'indianer', \
K_n: 'kein_bang', \
K_k: 'kinnhaken', \
K_x: 'knapp', \
K_p: 'postkutsche', \
K_a: 'angry', \
K_w: 'shot_sheriff', \
K_r: 'talk', \
K_t: 'treffer', \
K_v: 'verwirrung', \
}
# timelimit for player's move. This is invoked, if "timelimit" button is pressed
# speech announces 30, 20, 10, 5, 4, 3, 2, 1 seconds till end
player_timelimit = 30
# walk through subdirectories, collect sounds
def read_dir():
bangdict = {}
print "Reading directories..."
for dname, dnames, fnames in os.walk('.'):
dname = re.sub('.*/','',dname)
if dname != '.' and dname != 'ambiente' and dname != 'speech':
soundfiles = []
for ext in sndfile_extensions:
soundfiles += glob.glob(dname + '/' + '*.' + ext)
if len(soundfiles) > 0:
bangdict[dname] = soundfiles
print "done."
return bangdict
# Choose random sound from folder
def random_sound(tkey):
rndn = random.randint(0,len(bangsounds[tkey])-1)
return bangsounds[tkey][rndn]
# Queue sound to player
def queue_sound(tsnd):
print "Playing", tsnd
sound = pygame.mixer.Sound(tsnd)
sound.play()
# transform 2D index to linear
def get_linear_index(x,y):
return x + y*nfieldx
# get y coordinate of linear index
def get_index_y(li):
return li / nfieldx
# get x coordinate of linear index
def get_index_x(li):
return li % nfieldx
# get field coordinates by mouse cursor position
def get_field(xm, ym):
for xn in range(len(xborders)-1):
if xm > xborders[xn] and xm <= xborders[xn+1]:
break
for yn in range(len(yborders)-1):
if ym >= yborders[yn] and ym <= yborders[yn+1]:
break
return xn, yn
# get button name by mouse coordinates
def get_button(xm, ym):
xn, yn = get_field(xm, ym)
return bangbuttons[get_linear_index(xn,yn)]
# draw a small (white) exit corner in the bottom right field
def draw_exitcorner():
pygame.draw.rect(window, cwhite, (width-exitcorner_size,height-exitcorner_size,width,height))
def buttoncaption(buttonname):
return re.sub('_',' ',buttonname.capitalize())
# INIT SOUNDS
# dictionary of sounds and buttonnames
bangsounds = read_dir()
# alphabetically sorted buttons in array
bangbuttons = sorted(bangsounds, key=lambda key: bangsounds[key])
# add custom buttons, e.g. for timelimit, stoptimelimit and stopsound
bangbuttons += ['timelimit', 'stoptime', 'stopsound','nextplayer']
nbuttons = len(bangbuttons)
# GAME WINDOW
pygame.init()
pygame.mixer.init()
pygame.font.init()
# fps clock
fpsClock = pygame.time.Clock()
# linewidth and 0.5*linewidth
lw = 4
lwh = int(round(0.5*lw))
# create window handler
if fullscreen:
window = pygame.display.set_mode((width, height), pygame.FULLSCREEN)
else:
window = pygame.display.set_mode((width, height), DOUBLEBUF | HWSURFACE)
pygame.display.set_caption('Bang!soundmachine')
# set colors
cwhite = pygame.Color(255,255,255)
cblack = pygame.Color(0,0,0)
cred = pygame.Color(255,0,0)
cblue = pygame.Color(0,190,255)
cgreen = pygame.Color(0,255,150)
cyellow = pygame.Color(255,255,0)
# set color for buttons
colorbuttons = {\
'bang': cred, 'gatling': cred, 'kinnhaken': cred, \
'fehlschuss': cgreen, 'treffer': cgreen, \
'postkutsche': cyellow, 'general_store': cyellow, \
'kein_bang': cblue\
}
# size for the exit corner
exitcorner_size = 30
# initial window drawings
window.fill(cblack)
pygame.draw.line(window, cwhite, (0,0),(0,height),lw)
pygame.draw.line(window, cwhite, (0,0),(width,0),lw)
pygame.draw.line(window, cwhite, (0,height-lw+1),(width,height-lw+1),lw)
pygame.draw.line(window, cwhite, (width-lw+1,0),(width-lw+1,height),lw)
draw_exitcorner()
awidth = width - 2*lw
aheight = height - 2*lw
surface = (awidth) * (aheight)
ratio = float(awidth) / float(aheight)
fieldsurface = float(surface) / float(nbuttons)
# get field size with a certain edge ratio
fieldy = math.sqrt(fieldsurface / ratio)
fieldx = fieldy * ratio
fieldy = fieldy
testsurface = fieldx * fieldy
# higher number of fields in every direction
nfieldx = int(round(0.5+float(awidth)/fieldx))
nfieldy = int(round(0.5+float(aheight)/fieldy))
# try to avoid empty columns or rows
if (nfieldx - 1) * nfieldy >= nbuttons:
nfieldx -= 1
if (nfieldy - 1) * nfieldx >= nbuttons:
nfieldy -= 1
xborders = [0]
yborders = [0]
# draw borders of fields
if nfieldx > 0:
dx = int(awidth / nfieldx)
xoff = dx
for i in range(nfieldx-1):
xborders.append(xoff)
pygame.draw.line(window, cwhite, (xoff-lwh,0),(xoff-lwh,height),lw)
xoff += dx
if nfieldy > 0:
dy = int(aheight / nfieldy)
yoff = dy
for i in range(nfieldy-1):
yborders.append(yoff)
pygame.draw.line(window, cwhite, (0,yoff-lwh),(width,yoff-lwh),lw)
yoff += dy
xborders.append(width)
yborders.append(height)
# get maximum font size by testing if every button string fits into the fields
fontsize = 100
in_progress = True
print "Determining maximum possible font size..."
while in_progress:
tfont = pygame.font.SysFont("Arial", fontsize)
xtmp, ytmp = tfont.size(buttoncaption(bangbuttons[-1]))
xvals = [xtmp]
yvals = [ytmp]
for i in range(nbuttons-1):
xtmp, ytmp = tfont.size(buttoncaption(bangbuttons[i]))
xvals.append(xtmp)
yvals.append(ytmp)
if max(xvals) >= dx or max(yvals) >= dy:
fontsize -= 1
else:
in_progress = False
print "Done."
# Set buttons
for i in range(nbuttons):
buttonname = bangbuttons[i]
if buttonname in colorbuttons:
tcolor = colorbuttons[buttonname]
else:
tcolor = cwhite
ttext = tfont.render(buttoncaption(buttonname), True, tcolor)
trect = ttext.get_rect()
rx, ry = trect.bottomright
# midpoint rectangle
mrx = 0.5 * rx
mry = 0.5 * ry
ix = get_index_x(i)
iy = get_index_y(i)
xta = xborders[ix]
xtb = xborders[ix+1]
yta = yborders[iy]
ytb = yborders[iy+1]
# midpoint field
mx = 0.5 * (xta + xtb)
my = 0.5 * (yta + ytb)
# move button text start corner to the correct coordinates
window.blit(ttext,(int(mx-mrx),int(my-mry)))
# display the drawings
pygame.display.update()
# Startup sound
queue_sound('speech/hellouser.wav')
# frames per second
fps = 10
# frame counter
counter = 0
# second counter
seconds = 0
# timelimit starting value for user move
timelimit = False
#last_ifx = 0
#last_ify = 0
# MAIN LOOP
while True:
# loop over events
for event in pygame.event.get():
# check for quit request
if event.type == QUIT:
pygame.quit()
sys.exit()
# key pressed
elif event.type == KEYDOWN:
# check if in keybindings
if event.key in keybindings:
tbutton = keybindings[event.key]
psnd = random_sound(tbutton)
queue_sound(psnd)
# fade out sounds if escape is pressed
elif event.key == K_ESCAPE:
pygame.mixer.fadeout(2000)
# track mouse motion (fields could e.g. be highlighted)
elif event.type == MOUSEMOTION:
xm, ym = event.pos
#ifx, ify = get_field(xm, ym)
#if ifx != last_ifx or ify != last_ify:
# last_ifx = ifx
# last_ify = ify
# print ifx, ify
# Mouse button is pressed
elif event.type == MOUSEBUTTONDOWN:
xm, ym = event.pos
# hit exit corner, quit!
if xm > width - exitcorner_size and ym > height - exitcorner_size:
pygame.event.post(pygame.event.Event(QUIT))
else:
# try to play sound, otherwise fade out (e.g. if button without function is pressed)
try:
cbutton = get_button(xm, ym)
if cbutton == 'stopsound':
pygame.mixer.fadeout(1000)
# start player timer
elif cbutton == 'timelimit':
seconds = 0
timelimit = True
elif cbutton == 'stoptime':
timelimit = False
elif cbutton == 'nextplayer':
queue_sound('speech/end_of_line.wav')
else:
queue_sound(random_sound(cbutton))
except Exception as e:
pygame.mixer.fadeout(2000)
pygame.display.update()
# increment fps counter
counter += 1
# if we have reached the number of fps, 1s has passed.
if counter >= fps:
# check for player timelimit
if timelimit:
# remaining seconds
seconds_left = player_timelimit - seconds
# play sounds
if seconds_left > 0 and seconds_left <= 5:
queue_sound('speech/' + str(seconds_left) + '_seconds.wav')
elif seconds_left == 30:
queue_sound('speech/30_seconds.wav')
elif seconds_left == 20:
queue_sound('speech/20_seconds.wav')
elif seconds_left == 10:
queue_sound('speech/10_seconds.wav')
elif seconds_left == 0:
timelimit = False
queue_sound('speech/ba_endline.wav')
counter = 0
seconds += 1
# let the clock tick
fpsClock.tick(fps)
| gpl-3.0 | -6,629,903,377,394,719,000 | 27.183206 | 100 | 0.624503 | false |
ds-hwang/chromium-crosswalk | mojo/public/tools/manifest/manifest_collator.py | 2 | 1537 | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" A collator for Mojo Application Manifests """
import argparse
import json
import shutil
import sys
import urlparse
def ParseJSONFile(filename):
with open(filename) as json_file:
try:
return json.load(json_file)
except ValueError:
print "%s is not a valid JSON document" % filename
return None
def main():
parser = argparse.ArgumentParser(
description="Collate Mojo application manifests.")
parser.add_argument("--parent")
parser.add_argument("--output")
parser.add_argument("--application-name")
args, children = parser.parse_known_args()
parent = ParseJSONFile(args.parent)
if parent == None:
return 1
parsed = urlparse.urlparse(parent['url'])
if args.application_name != parsed.hostname:
raise ValueError("Application name '%s' specified in build file does not " \
"match application name '%s' specified in manifest." %
(args.application_name, parsed.hostname))
applications = []
for child in children:
application = ParseJSONFile(child)
if application == None:
return 1
applications.append(application)
if len(applications) > 0:
parent['applications'] = applications
with open(args.output, 'w') as output_file:
json.dump(parent, output_file)
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | 8,324,561,153,217,035,000 | 26.446429 | 80 | 0.681848 | false |
rahul003/mxnet | tests/python/unittest/test_sparse_ndarray.py | 1 | 38575 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pickle as pkl
from mxnet.ndarray import NDArray
from mxnet.test_utils import *
from common import setup_module, with_seed, random_seed, teardown
from mxnet.base import mx_real_t
from numpy.testing import assert_allclose
import numpy.random as rnd
import numpy as np
from common import assertRaises
from mxnet.ndarray.sparse import RowSparseNDArray, CSRNDArray
def sparse_nd_ones(shape, stype):
return mx.nd.ones(shape).tostype(stype)
@with_seed()
def test_sparse_nd_elemwise_add():
def check_sparse_nd_elemwise_binary(shapes, stypes, f, g):
# generate inputs
nds = []
for i, stype in enumerate(stypes):
if stype == 'row_sparse':
nd, _ = rand_sparse_ndarray(shapes[i], stype)
elif stype == 'default':
nd = mx.nd.array(random_arrays(shapes[i]), dtype = np.float32)
else:
assert(False)
nds.append(nd)
# check result
test = f(nds[0], nds[1])
assert_almost_equal(test.asnumpy(), g(nds[0].asnumpy(), nds[1].asnumpy()))
num_repeats = 3
g = lambda x,y: x + y
op = mx.nd.elemwise_add
for i in range(num_repeats):
shape = [rand_shape_2d()] * 2
check_sparse_nd_elemwise_binary(shape, ['default'] * 2, op, g)
check_sparse_nd_elemwise_binary(shape, ['row_sparse', 'row_sparse'], op, g)
@with_seed()
def test_sparse_nd_copy():
def check_sparse_nd_copy(from_stype, to_stype, shape):
from_nd = rand_ndarray(shape, from_stype)
# copy to ctx
to_ctx = from_nd.copyto(default_context())
# copy to stype
to_nd = rand_ndarray(shape, to_stype)
to_nd = from_nd.copyto(to_nd)
assert np.sum(np.abs(from_nd.asnumpy() != to_ctx.asnumpy())) == 0.0
assert np.sum(np.abs(from_nd.asnumpy() != to_nd.asnumpy())) == 0.0
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
stypes = ['row_sparse', 'csr']
for stype in stypes:
check_sparse_nd_copy(stype, 'default', shape)
check_sparse_nd_copy('default', stype, shape)
check_sparse_nd_copy('row_sparse', 'row_sparse', shape_3d)
check_sparse_nd_copy('row_sparse', 'default', shape_3d)
check_sparse_nd_copy('default', 'row_sparse', shape_3d)
@with_seed()
def test_sparse_nd_basic():
def check_sparse_nd_basic_rsp():
storage_type = 'row_sparse'
shape = rand_shape_2d()
nd, (v, idx) = rand_sparse_ndarray(shape, storage_type)
assert(nd._num_aux == 1)
assert(nd.indices.dtype == np.int64)
assert(nd.stype == 'row_sparse')
check_sparse_nd_basic_rsp()
@with_seed()
def test_sparse_nd_setitem():
def check_sparse_nd_setitem(stype, shape, dst):
x = mx.nd.zeros(shape=shape, stype=stype)
x[:] = dst
dst_nd = mx.nd.array(dst) if isinstance(dst, (np.ndarray, np.generic)) else dst
assert np.all(x.asnumpy() == dst_nd.asnumpy() if isinstance(dst_nd, NDArray) else dst)
shape = rand_shape_2d()
for stype in ['row_sparse', 'csr']:
# ndarray assignment
check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, 'default'))
check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, stype))
# numpy assignment
check_sparse_nd_setitem(stype, shape, np.ones(shape))
# scalar assigned to row_sparse NDArray
check_sparse_nd_setitem('row_sparse', shape, 2)
@with_seed()
def test_sparse_nd_slice():
shape = (rnd.randint(2, 10), rnd.randint(2, 10))
stype = 'csr'
A, _ = rand_sparse_ndarray(shape, stype)
A2 = A.asnumpy()
start = rnd.randint(0, shape[0] - 1)
end = rnd.randint(start + 1, shape[0])
assert same(A[start:end].asnumpy(), A2[start:end])
assert same(A[start - shape[0]:end].asnumpy(), A2[start:end])
assert same(A[start:].asnumpy(), A2[start:])
assert same(A[:end].asnumpy(), A2[:end])
ind = rnd.randint(-shape[0], shape[0] - 1)
assert same(A[ind].asnumpy(), A2[ind][np.newaxis, :])
start_col = rnd.randint(0, shape[1] - 1)
end_col = rnd.randint(start_col + 1, shape[1])
result = mx.nd.slice(A, begin=(start, start_col), end=(end, end_col))
result_dense = mx.nd.slice(mx.nd.array(A2), begin=(start, start_col), end=(end, end_col))
assert same(result_dense.asnumpy(), result.asnumpy())
A = mx.nd.sparse.zeros('csr', shape)
A2 = A.asnumpy()
assert same(A[start:end].asnumpy(), A2[start:end])
result = mx.nd.slice(A, begin=(start, start_col), end=(end, end_col))
result_dense = mx.nd.slice(mx.nd.array(A2), begin=(start, start_col), end=(end, end_col))
assert same(result_dense.asnumpy(), result.asnumpy())
def check_slice_nd_csr_fallback(shape):
stype = 'csr'
A, _ = rand_sparse_ndarray(shape, stype)
A2 = A.asnumpy()
start = rnd.randint(0, shape[0] - 1)
end = rnd.randint(start + 1, shape[0])
# non-trivial step should fallback to dense slice op
result = mx.nd.sparse.slice(A, begin=(start,), end=(end + 1,), step=(2,))
result_dense = mx.nd.slice(mx.nd.array(A2), begin=(start,), end=(end + 1,), step=(2,))
assert same(result_dense.asnumpy(), result.asnumpy())
shape = (rnd.randint(2, 10), rnd.randint(1, 10))
check_slice_nd_csr_fallback(shape)
@with_seed()
def test_sparse_nd_concat():
def check_concat(arrays):
ret = np.concatenate([arr.asnumpy() for arr in arrays], axis=0)
same(mx.nd.concat(*arrays, dim=0).asnumpy(), ret)
nds = []
zero_nds = []
ncols = rnd.randint(2, 10)
for i in range(3):
shape = (rnd.randint(2, 10), ncols)
A, _ = rand_sparse_ndarray(shape, 'csr')
nds.append(A)
zero_nds.append(mx.nd.zeros(shape).tostype('csr'))
check_concat(nds)
check_concat(zero_nds)
@with_seed()
def test_sparse_nd_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x == y
assert (z.asnumpy() == np.zeros(shape)).all()
z = 0 == x
assert (z.asnumpy() == np.ones(shape)).all()
@with_seed()
def test_sparse_nd_not_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x != y
assert (z.asnumpy() == np.ones(shape)).all()
z = 0 != x
assert (z.asnumpy() == np.zeros(shape)).all()
@with_seed()
def test_sparse_nd_greater():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x > y
assert (z.asnumpy() == np.zeros(shape)).all()
z = y > 0
assert (z.asnumpy() == np.ones(shape)).all()
z = 0 > y
assert (z.asnumpy() == np.zeros(shape)).all()
@with_seed()
def test_sparse_nd_greater_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x >= y
assert (z.asnumpy() == np.zeros(shape)).all()
z = y >= 0
assert (z.asnumpy() == np.ones(shape)).all()
z = 0 >= y
assert (z.asnumpy() == np.zeros(shape)).all()
z = y >= 1
assert (z.asnumpy() == np.ones(shape)).all()
@with_seed()
def test_sparse_nd_lesser():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = y < x
assert (z.asnumpy() == np.zeros(shape)).all()
z = 0 < y
assert (z.asnumpy() == np.ones(shape)).all()
z = y < 0
assert (z.asnumpy() == np.zeros(shape)).all()
@with_seed()
def test_sparse_nd_lesser_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = y <= x
assert (z.asnumpy() == np.zeros(shape)).all()
z = 0 <= y
assert (z.asnumpy() == np.ones(shape)).all()
z = y <= 0
assert (z.asnumpy() == np.zeros(shape)).all()
z = 1 <= y
assert (z.asnumpy() == np.ones(shape)).all()
@with_seed()
def test_sparse_nd_binary():
N = 3
def check_binary(fn, stype):
for _ in range(N):
ndim = 2
oshape = np.random.randint(1, 6, size=(ndim,))
bdim = 2
lshape = list(oshape)
# one for broadcast op, another for elemwise op
rshape = list(oshape[ndim-bdim:])
for i in range(bdim):
sep = np.random.uniform(0, 1)
if sep < 0.33:
lshape[ndim-i-1] = 1
elif sep < 0.66:
rshape[bdim-i-1] = 1
lhs = np.random.uniform(0, 1, size=lshape)
rhs = np.random.uniform(0, 1, size=rshape)
lhs_nd = mx.nd.array(lhs).tostype(stype)
rhs_nd = mx.nd.array(rhs).tostype(stype)
assert_allclose(fn(lhs, rhs), fn(lhs_nd, rhs_nd).asnumpy(), rtol=1e-4, atol=1e-4)
assert_allclose(fn(lhs, lhs), fn(lhs_nd, lhs_nd).asnumpy(), rtol=1e-4, atol=1e-4)
stypes = ['row_sparse', 'csr']
for stype in stypes:
check_binary(lambda x, y: x + y, stype)
check_binary(lambda x, y: x - y, stype)
check_binary(lambda x, y: x * y, stype)
check_binary(lambda x, y: x / y, stype)
check_binary(lambda x, y: x ** y, stype)
check_binary(lambda x, y: x > y, stype)
check_binary(lambda x, y: x < y, stype)
check_binary(lambda x, y: x >= y, stype)
check_binary(lambda x, y: x <= y, stype)
check_binary(lambda x, y: x == y, stype)
@with_seed()
def test_sparse_nd_binary_scalar_op():
N = 3
def check(fn, stype, out_stype=None):
for _ in range(N):
ndim = 2
shape = np.random.randint(1, 6, size=(ndim,))
npy = np.random.normal(0, 1, size=shape)
nd = mx.nd.array(npy).tostype(stype)
if out_stype is not None:
assert(nd.stype == out_stype)
assert_allclose(fn(npy), fn(nd).asnumpy(), rtol=1e-4, atol=1e-4)
stypes = ['row_sparse', 'csr']
for stype in stypes:
check(lambda x: 1 + x, stype)
check(lambda x: 1 - x, stype)
check(lambda x: 1 * x, stype)
check(lambda x: 1 / x, stype)
check(lambda x: 2 ** x, stype)
check(lambda x: 1 > x, stype)
check(lambda x: 0.5 > x, stype)
check(lambda x: 0.5 < x, stype)
check(lambda x: 0.5 >= x, stype)
check(lambda x: 0.5 <= x, stype)
check(lambda x: 0.5 == x, stype)
check(lambda x: x / 2, stype, out_stype=stype)
check(lambda x: x + 0, stype, out_stype=stype)
check(lambda x: x - 0, stype, out_stype=stype)
@with_seed()
def test_sparse_nd_binary_iop():
N = 3
def check_binary(fn, stype):
for _ in range(N):
ndim = 2
oshape = np.random.randint(1, 6, size=(ndim,))
lshape = list(oshape)
rshape = list(oshape)
lhs = np.random.uniform(0, 1, size=lshape)
rhs = np.random.uniform(0, 1, size=rshape)
lhs_nd = mx.nd.array(lhs).tostype(stype)
rhs_nd = mx.nd.array(rhs).tostype(stype)
assert_allclose(fn(lhs, rhs),
fn(lhs_nd, rhs_nd).asnumpy(),
rtol=1e-4, atol=1e-4)
def inplace_add(x, y):
x += y
return x
def inplace_mul(x, y):
x *= y
return x
stypes = ['csr', 'row_sparse']
fns = [inplace_add, inplace_mul]
for stype in stypes:
for fn in fns:
check_binary(fn, stype)
@with_seed()
def test_sparse_nd_negate():
def check_sparse_nd_negate(shape, stype):
npy = np.random.uniform(-10, 10, rand_shape_2d())
arr = mx.nd.array(npy).tostype(stype)
assert_almost_equal(npy, arr.asnumpy())
assert_almost_equal(-npy, (-arr).asnumpy())
# a final check to make sure the negation (-) is not implemented
# as inplace operation, so the contents of arr does not change after
# we compute (-arr)
assert_almost_equal(npy, arr.asnumpy())
shape = rand_shape_2d()
stypes = ['csr', 'row_sparse']
for stype in stypes:
check_sparse_nd_negate(shape, stype)
@with_seed()
def test_sparse_nd_broadcast():
sample_num = 1000
# TODO(haibin) test with more than 2 dimensions
def test_broadcast_to(stype):
for _ in range(sample_num):
ndim = 2
target_shape = np.random.randint(1, 11, size=ndim)
shape = target_shape.copy()
axis_flags = np.random.randint(0, 2, size=ndim)
for (axis, flag) in enumerate(axis_flags):
if flag:
shape[axis] = 1
dat = np.random.rand(*shape) - 0.5
numpy_ret = dat
ndarray = mx.nd.array(dat).tostype(stype)
ndarray_ret = ndarray.broadcast_to(shape=target_shape)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == target_shape).all()
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-8
def test_broadcast_like(stype):
for _ in range(sample_num):
ndim = 2
target_shape = np.random.randint(1, 11, size=ndim)
target = mx.nd.ones(shape=tuple(target_shape))
shape = target_shape.copy()
axis_flags = np.random.randint(0, 2, size=ndim)
for (axis, flag) in enumerate(axis_flags):
if flag:
shape[axis] = 1
dat = np.random.rand(*shape) - 0.5
numpy_ret = dat
ndarray = mx.nd.array(dat).tostype(stype)
ndarray_ret = ndarray.broadcast_like(target)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == target_shape).all()
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-8
stypes = ['csr', 'row_sparse']
for stype in stypes:
test_broadcast_to(stype)
test_broadcast_like(stype)
@with_seed()
def test_sparse_nd_transpose():
npy = np.random.uniform(-10, 10, rand_shape_2d())
stypes = ['csr', 'row_sparse']
for stype in stypes:
nd = mx.nd.array(npy).tostype(stype)
assert_almost_equal(npy.T, (nd.T).asnumpy())
@with_seed()
def test_sparse_nd_storage_fallback():
def check_output_fallback(shape):
ones = mx.nd.ones(shape)
out = mx.nd.zeros(shape=shape, stype='csr')
mx.nd.broadcast_add(ones, ones * 2, out=out)
assert(np.sum(out.asnumpy() - 3) == 0)
def check_input_fallback(shape):
ones = mx.nd.ones(shape)
out = mx.nd.broadcast_add(ones.tostype('csr'), ones.tostype('row_sparse'))
assert(np.sum(out.asnumpy() - 2) == 0)
def check_fallback_with_temp_resource(shape):
ones = mx.nd.ones(shape)
out = mx.nd.sum(ones)
assert(out.asscalar() == np.prod(shape))
shape = rand_shape_2d()
check_output_fallback(shape)
check_input_fallback(shape)
check_fallback_with_temp_resource(shape)
@with_seed()
def test_sparse_nd_random():
""" test sparse random operator on cpu """
# gpu random operator doesn't use fixed seed
if default_context().device_type is 'gpu':
return
shape = (100, 100)
fns = [mx.nd.random.uniform, mx.nd.random.normal, mx.nd.random.gamma]
for fn in fns:
rsp_out = mx.nd.zeros(shape=shape, stype='row_sparse')
dns_out = mx.nd.zeros(shape=shape, stype='default')
with random_seed(0):
fn(shape=shape, out=dns_out)
with random_seed(0):
fn(shape=shape, out=rsp_out)
assert_almost_equal(dns_out.asnumpy(), rsp_out.asnumpy())
@with_seed()
def test_sparse_nd_astype():
stypes = ['row_sparse', 'csr']
for stype in stypes:
x = mx.nd.zeros(shape=rand_shape_2d(), stype=stype, dtype='float32')
y = x.astype('int32')
assert(y.dtype == np.int32), y.dtype
@with_seed()
def test_sparse_nd_astype_copy():
stypes = ['row_sparse', 'csr']
for stype in stypes:
x = mx.nd.zeros(shape=rand_shape_2d(), stype=stype, dtype='int32')
y = x.astype('float32')
assert (y.dtype == np.float32)
# Test that a new ndarray has been allocated
assert (id(x) != id(y))
y = x.astype('float32', copy=False)
assert (y.dtype == np.float32)
# Test that a new ndarray has been allocated
assert (id(x) != id(y))
y = x.astype('int32')
assert (y.dtype == np.int32)
# Test that a new ndarray has been allocated
# even though they have same dtype
assert (id(x) != id(y))
# Test that a new ndarray has not been allocated
y = x.astype('int32', copy=False)
assert (id(x) == id(y))
# Test the string version 'int32'
# has the same behaviour as the np.int32
y = x.astype(np.int32, copy=False)
assert (id(x) == id(y))
@with_seed(0)
def test_sparse_nd_pickle():
repeat = 1
dim0 = 40
dim1 = 40
stypes = ['row_sparse', 'csr']
densities = [0, 0.5]
stype_dict = {'row_sparse': RowSparseNDArray, 'csr': CSRNDArray}
for _ in range(repeat):
shape = rand_shape_2d(dim0, dim1)
for stype in stypes:
for density in densities:
a, _ = rand_sparse_ndarray(shape, stype, density)
assert isinstance(a, stype_dict[stype])
data = pkl.dumps(a)
b = pkl.loads(data)
assert isinstance(b, stype_dict[stype])
assert same(a.asnumpy(), b.asnumpy())
@with_seed(0)
def test_sparse_nd_save_load():
repeat = 1
stypes = ['default', 'row_sparse', 'csr']
stype_dict = {'default': NDArray, 'row_sparse': RowSparseNDArray, 'csr': CSRNDArray}
num_data = 20
densities = [0, 0.5]
fname = 'tmp_list.bin'
for _ in range(repeat):
data_list1 = []
for i in range(num_data):
stype = stypes[np.random.randint(0, len(stypes))]
shape = rand_shape_2d(dim0=40, dim1=40)
density = densities[np.random.randint(0, len(densities))]
data_list1.append(rand_ndarray(shape, stype, density))
assert isinstance(data_list1[-1], stype_dict[stype])
mx.nd.save(fname, data_list1)
data_list2 = mx.nd.load(fname)
assert len(data_list1) == len(data_list2)
for x, y in zip(data_list1, data_list2):
assert same(x.asnumpy(), y.asnumpy())
data_map1 = {'ndarray xx %s' % i: x for i, x in enumerate(data_list1)}
mx.nd.save(fname, data_map1)
data_map2 = mx.nd.load(fname)
assert len(data_map1) == len(data_map2)
for k, x in data_map1.items():
y = data_map2[k]
assert same(x.asnumpy(), y.asnumpy())
os.remove(fname)
@with_seed()
def test_sparse_nd_unsupported():
nd = mx.nd.zeros((2,2), stype='row_sparse')
fn_slice = lambda x: x._slice(None, None)
fn_at = lambda x: x._at(None)
fn_reshape = lambda x: x.reshape(None)
fns = [fn_slice, fn_at, fn_reshape]
for fn in fns:
try:
fn(nd)
assert(False)
except:
pass
@with_seed()
def test_create_csr():
def check_create_csr_from_nd(shape, density, dtype):
matrix = rand_ndarray(shape, 'csr', density)
# create data array with provided dtype and ctx
data = mx.nd.array(matrix.data.asnumpy(), dtype=dtype)
indptr = matrix.indptr
indices = matrix.indices
csr_created = mx.nd.sparse.csr_matrix((data, indices, indptr), shape=shape)
assert csr_created.stype == 'csr'
assert same(csr_created.data.asnumpy(), data.asnumpy())
assert same(csr_created.indptr.asnumpy(), indptr.asnumpy())
assert same(csr_created.indices.asnumpy(), indices.asnumpy())
# verify csr matrix dtype and ctx is consistent from the ones provided
assert csr_created.dtype == dtype, (csr_created, dtype)
assert csr_created.data.dtype == dtype, (csr_created.data.dtype, dtype)
assert csr_created.context == Context.default_ctx, (csr_created.context, Context.default_ctx)
csr_copy = mx.nd.array(csr_created)
assert(same(csr_copy.asnumpy(), csr_created.asnumpy()))
def check_create_csr_from_coo(shape, density, dtype):
matrix = rand_ndarray(shape, 'csr', density)
sp_csr = matrix.asscipy()
sp_coo = sp_csr.tocoo()
csr_created = mx.nd.sparse.csr_matrix((sp_coo.data, (sp_coo.row, sp_coo.col)), shape=shape, dtype=dtype)
assert csr_created.stype == 'csr'
assert same(csr_created.data.asnumpy(), sp_csr.data)
assert same(csr_created.indptr.asnumpy(), sp_csr.indptr)
assert same(csr_created.indices.asnumpy(), sp_csr.indices)
csr_copy = mx.nd.array(csr_created)
assert(same(csr_copy.asnumpy(), csr_created.asnumpy()))
# verify csr matrix dtype and ctx is consistent
assert csr_created.dtype == dtype, (csr_created.dtype, dtype)
assert csr_created.data.dtype == dtype, (csr_created.data.dtype, dtype)
assert csr_created.context == Context.default_ctx, (csr_created.context, Context.default_ctx)
def check_create_csr_from_scipy(shape, density, f):
def assert_csr_almost_equal(nd, sp):
assert_almost_equal(nd.data.asnumpy(), sp.data)
assert_almost_equal(nd.indptr.asnumpy(), sp.indptr)
assert_almost_equal(nd.indices.asnumpy(), sp.indices)
sp_csr = nd.asscipy()
assert_almost_equal(sp_csr.data, sp.data)
assert_almost_equal(sp_csr.indptr, sp.indptr)
assert_almost_equal(sp_csr.indices, sp.indices)
assert(sp.dtype == sp_csr.dtype), (sp.dtype, sp_csr.dtype)
try:
import scipy.sparse as spsp
# random canonical csr
csr_sp = spsp.rand(shape[0], shape[1], density, format="csr")
csr_nd = f(csr_sp)
assert_csr_almost_equal(csr_nd, csr_sp)
# non-canonical csr which contains duplicates and unsorted indices
indptr = np.array([0, 2, 3, 7])
indices = np.array([0, 2, 2, 0, 1, 2, 1])
data = np.array([1, 2, 3, 4, 5, 6, 1])
non_canonical_csr = spsp.csr_matrix((data, indices, indptr), shape=(3, 3), dtype=csr_nd.dtype)
canonical_csr_nd = f(non_canonical_csr, dtype=csr_nd.dtype)
canonical_csr_sp = non_canonical_csr.copy()
canonical_csr_sp.sum_duplicates()
canonical_csr_sp.sort_indices()
assert_csr_almost_equal(canonical_csr_nd, canonical_csr_sp)
except ImportError:
print("Could not import scipy.sparse. Skipping unit tests for scipy csr creation")
dim0 = 20
dim1 = 20
densities = [0, 0.5]
dtype = np.float64
for density in densities:
shape = rand_shape_2d(dim0, dim1)
check_create_csr_from_nd(shape, density, dtype)
check_create_csr_from_coo(shape, density, dtype)
check_create_csr_from_scipy(shape, density, mx.nd.sparse.array)
check_create_csr_from_scipy(shape, density, mx.nd.array)
@with_seed()
def test_create_row_sparse():
dim0 = 50
dim1 = 50
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape)
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
# add this test since we added np.int32 and np.int64 to integer_types
if len(shape) == 2:
for np_int_type in (np.int32, np.int64):
shape = list(shape)
shape = [np_int_type(x) for x in shape]
arg1 = tuple(shape)
mx.nd.sparse.row_sparse_array(arg1, tuple(shape))
shape[0] += 1
assert_exception(mx.nd.sparse.row_sparse_array, ValueError, arg1, tuple(shape))
@with_seed()
def test_create_sparse_nd_infer_shape():
def check_create_csr_infer_shape(shape, density, dtype):
try:
matrix = rand_ndarray(shape, 'csr', density=density)
data = matrix.data
indptr = matrix.indptr
indices = matrix.indices
nd = mx.nd.sparse.csr_matrix((data, indices, indptr), dtype=dtype)
num_rows, num_cols = nd.shape
assert(num_rows == len(indptr) - 1)
assert(indices.shape[0] > 0), indices
assert(np.sum((num_cols <= indices).asnumpy()) == 0)
assert(nd.dtype == dtype), (nd.dtype, dtype)
# cannot infer on invalid shape
except ValueError:
pass
def check_create_rsp_infer_shape(shape, density, dtype):
try:
array = rand_ndarray(shape, 'row_sparse', density=density)
data = array.data
indices = array.indices
nd = mx.nd.sparse.row_sparse_array((data, indices), dtype=dtype)
inferred_shape = nd.shape
assert(inferred_shape[1:] == data.shape[1:])
assert(indices.ndim > 0)
assert(nd.dtype == dtype)
if indices.shape[0] > 0:
assert(np.sum((inferred_shape[0] <= indices).asnumpy()) == 0)
# cannot infer on invalid shape
except ValueError:
pass
dtype = np.int32
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
densities = [0, 0.5, 1]
for density in densities:
check_create_csr_infer_shape(shape, density, dtype)
check_create_rsp_infer_shape(shape, density, dtype)
check_create_rsp_infer_shape(shape_3d, density, dtype)
@with_seed()
def test_create_sparse_nd_from_dense():
def check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx):
arr = f(dense_arr, dtype=dtype, ctx=ctx)
assert(same(arr.asnumpy(), np.ones(shape)))
assert(arr.dtype == dtype)
assert(arr.context == ctx)
# verify the default dtype inferred from dense arr
arr2 = f(dense_arr)
assert(arr2.dtype == default_dtype)
assert(arr2.context == Context.default_ctx)
shape = rand_shape_2d()
dtype = np.int32
src_dtype = np.float64
ctx = mx.cpu(1)
dense_arrs = [mx.nd.ones(shape, dtype=src_dtype), np.ones(shape, dtype=src_dtype), \
np.ones(shape, dtype=src_dtype).tolist()]
for f in [mx.nd.sparse.csr_matrix, mx.nd.sparse.row_sparse_array]:
for dense_arr in dense_arrs:
default_dtype = dense_arr.dtype if isinstance(dense_arr, (NDArray, np.ndarray)) \
else np.float32
check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx)
@with_seed()
def test_create_sparse_nd_from_sparse():
def check_create_from_sp(shape, f, sp_arr, dtype, src_dtype, ctx):
arr = f(sp_arr, dtype=dtype, ctx=ctx)
assert(same(arr.asnumpy(), np.ones(shape)))
assert(arr.dtype == dtype)
assert(arr.context == ctx)
# verify the default dtype inferred from dense arr
arr2 = f(sp_arr)
assert(arr2.dtype == src_dtype)
assert(arr2.context == Context.default_ctx)
shape = rand_shape_2d()
src_dtype = np.float64
dtype = np.int32
ctx = mx.cpu(1)
ones = mx.nd.ones(shape, dtype=src_dtype)
csr_arrs = [ones.tostype('csr')]
rsp_arrs = [ones.tostype('row_sparse')]
try:
import scipy.sparse as spsp
csr_sp = spsp.csr_matrix(np.ones(shape, dtype=src_dtype))
csr_arrs.append(csr_sp)
except ImportError:
print("Could not import scipy.sparse. Skipping unit tests for scipy csr creation")
f_csr = mx.nd.sparse.csr_matrix
f_rsp = mx.nd.sparse.row_sparse_array
for sp_arr in csr_arrs:
check_create_from_sp(shape, f_csr, sp_arr, dtype, src_dtype, ctx)
for sp_arr in rsp_arrs:
check_create_from_sp(shape, f_rsp, sp_arr, dtype, src_dtype, ctx)
@with_seed()
def test_create_sparse_nd_empty():
def check_empty(shape, stype):
arr = mx.nd.empty(shape, stype=stype)
assert(arr.stype == stype)
assert same(arr.asnumpy(), np.zeros(shape))
def check_csr_empty(shape, dtype, ctx):
arr = mx.nd.sparse.csr_matrix(shape, dtype=dtype, ctx=ctx)
assert(arr.stype == 'csr')
assert(arr.dtype == dtype)
assert(arr.context == ctx)
assert same(arr.asnumpy(), np.zeros(shape))
# check the default value for dtype and ctx
arr = mx.nd.sparse.csr_matrix(shape)
assert(arr.dtype == np.float32)
assert(arr.context == Context.default_ctx)
def check_rsp_empty(shape, dtype, ctx):
arr = mx.nd.sparse.row_sparse_array(shape, dtype=dtype, ctx=ctx)
assert(arr.stype == 'row_sparse')
assert(arr.dtype == dtype)
assert(arr.context == ctx)
assert same(arr.asnumpy(), np.zeros(shape))
# check the default value for dtype and ctx
arr = mx.nd.sparse.row_sparse_array(shape)
assert(arr.dtype == np.float32)
assert(arr.context == Context.default_ctx)
stypes = ['csr', 'row_sparse']
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
dtype = np.int32
ctx = mx.cpu(1)
for stype in stypes:
check_empty(shape, stype)
check_csr_empty(shape, dtype, ctx)
check_rsp_empty(shape, dtype, ctx)
check_rsp_empty(shape_3d, dtype, ctx)
@with_seed()
def test_synthetic_dataset_generator():
def test_powerlaw_generator(csr_arr, final_row=1):
"""Test power law distribution
Total Elements: 32000, Number of zeros: 3200
Every row has 2 * non zero elements of the previous row.
Also since (2047 < 3200 < 4095) this will be true till 10th row"""
indices = csr_arr.indices.asnumpy()
indptr = csr_arr.indptr.asnumpy()
for row in range(1, final_row + 1):
nextrow = row + 1
current_row_nnz = indices[indptr[row] - 1] + 1
next_row_nnz = indices[indptr[nextrow] - 1] + 1
assert next_row_nnz == 2 * current_row_nnz
# Test if density is preserved
csr_arr_cols, _ = rand_sparse_ndarray(shape=(32, 10000), stype="csr",
density=0.01, distribution="powerlaw")
csr_arr_small, _ = rand_sparse_ndarray(shape=(5, 5), stype="csr",
density=0.5, distribution="powerlaw")
csr_arr_big, _ = rand_sparse_ndarray(shape=(32, 1000000), stype="csr",
density=0.4, distribution="powerlaw")
csr_arr_square, _ = rand_sparse_ndarray(shape=(1600, 1600), stype="csr",
density=0.5, distribution="powerlaw")
assert len(csr_arr_cols.data) == 3200
test_powerlaw_generator(csr_arr_cols, final_row=9)
test_powerlaw_generator(csr_arr_small, final_row=1)
test_powerlaw_generator(csr_arr_big, final_row=4)
test_powerlaw_generator(csr_arr_square, final_row=6)
@with_seed()
def test_sparse_nd_fluent():
def check_fluent_regular(stype, func, kwargs, shape=(5, 17), equal_nan=False):
with mx.name.NameManager():
data = mx.nd.random_uniform(shape=shape, ctx=default_context()).tostype(stype)
regular = getattr(mx.ndarray, func)(data, **kwargs)
fluent = getattr(data, func)(**kwargs)
if isinstance(regular, list):
for r, f in zip(regular, fluent):
assert almost_equal(r.asnumpy(), f.asnumpy(), equal_nan=equal_nan)
else:
assert almost_equal(regular.asnumpy(), fluent.asnumpy(), equal_nan=equal_nan)
all_funcs = ['zeros_like', 'square', 'round', 'rint', 'fix', 'floor', 'ceil', 'trunc',
'abs', 'sign', 'sin', 'degrees', 'radians', 'expm1']
for func in all_funcs:
check_fluent_regular('csr', func, {})
check_fluent_regular('row_sparse', func, {})
all_funcs = ['arcsin', 'arctan', 'tan', 'sinh', 'tanh',
'arcsinh', 'arctanh', 'log1p', 'sqrt', 'relu']
for func in all_funcs:
check_fluent_regular('csr', func, {}, equal_nan=True)
check_fluent_regular('row_sparse', func, {}, equal_nan=True)
check_fluent_regular('csr', 'slice', {'begin': (2, 5), 'end': (4, 7)}, shape=(5, 17))
check_fluent_regular('row_sparse', 'clip', {'a_min': -0.25, 'a_max': 0.75})
for func in ['sum', 'mean', 'norm']:
check_fluent_regular('csr', func, {'axis': 0})
@with_seed()
def test_sparse_nd_exception():
""" test invalid sparse operator will throw a exception """
a = mx.nd.ones((2,2))
assertRaises(mx.base.MXNetError, mx.nd.sparse.retain, a, invalid_arg="garbage_value")
assertRaises(ValueError, mx.nd.sparse.csr_matrix, a, shape=(3,2))
assertRaises(ValueError, mx.nd.sparse.csr_matrix, (2,2), shape=(3,2))
assertRaises(ValueError, mx.nd.sparse.row_sparse_array, (2,2), shape=(3,2))
assertRaises(ValueError, mx.nd.sparse.zeros, "invalid_stype", (2,2))
@with_seed()
def test_sparse_nd_check_format():
""" test check_format for sparse ndarray """
shape = rand_shape_2d()
stypes = ["csr", "row_sparse"]
for stype in stypes:
arr, _ = rand_sparse_ndarray(shape, stype)
arr.check_format()
arr = mx.nd.sparse.zeros(stype, shape)
arr.check_format()
# CSR format index pointer array should be less than the number of rows
shape = (3, 4)
data_list = [7, 8, 9]
indices_list = [0, 2, 1]
indptr_list = [0, 5, 2, 3]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format indices should be in ascending order per row
indices_list = [2, 1, 1]
indptr_list = [0, 2, 2, 3]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format indptr should end with value equal with size of indices
indices_list = [1, 2, 1]
indptr_list = [0, 2, 2, 4]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format indices should not be negative
indices_list = [0, 2, 1]
indptr_list = [0, -2, 2, 3]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# Row Sparse format indices should be less than the number of rows
shape = (3, 2)
data_list = [[1, 2], [3, 4]]
indices_list = [1, 4]
a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# Row Sparse format indices should be in ascending order
indices_list = [1, 0]
a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# Row Sparse format indices should not be negative
indices_list = [1, -2]
a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
@with_seed()
def test_sparse_nd_norm():
def check_sparse_nd_norm(stype, shape, density, **kwargs):
data, _ = rand_sparse_ndarray(shape, stype, density)
norm = data.norm(**kwargs)
expected_norm = data.tostype('default').norm(**kwargs)
assert_almost_equal(norm.asnumpy(), expected_norm.asnumpy())
shape = (5, 5)
stypes = ['row_sparse', 'csr']
densities = [0, 0.5, 1]
for stype in stypes:
for density in densities:
check_sparse_nd_norm(stype, shape, density, axis=None, keepdims=False, ord=2)
# test fallback
check_sparse_nd_norm(stype, shape, density, axis=0, keepdims=False, ord=2)
check_sparse_nd_norm(stype, shape, density, axis=None, keepdims=True, ord=2)
@with_seed()
def test_sparse_fc():
def check_sparse_fc(batch_size, dim_in, dim_out, stype):
data = rand_ndarray((batch_size, dim_in), stype, density=0.5)
weight = rand_ndarray((dim_out, dim_in), 'row_sparse', density=1)
bias = rand_ndarray((dim_out, 1), 'row_sparse', density=1)
out = mx.nd.sparse.FullyConnected(data, weight, num_hidden=dim_out, bias=bias)
data_dns = data.tostype('default')
weight_dns = weight.tostype('default')
out_dns = mx.nd.FullyConnected(data_dns, weight_dns, num_hidden=dim_out, bias=bias)
assert_almost_equal(out.asnumpy(), out_dns.asnumpy())
# test FC with row_sparse weight w/ density=1, dense data
check_sparse_fc(5, 10, 8, 'default')
# test FC with row_sparse weight w/ density=1, csr data (fallback)
check_sparse_fc(5, 10, 8, 'csr')
if __name__ == '__main__':
import nose
nose.runmodule()
| apache-2.0 | -1,889,587,153,475,739,600 | 37.807847 | 112 | 0.589086 | false |
madgik/exareme | Exareme-Docker/src/madisServer/MadisServer.py | 1 | 2979 | import tornado.web
from tornado import gen
from tornado.log import enable_pretty_logging
from tornado.options import define, options
import logging
import os
PROCESSES_PER_CPU = 2
WEB_SERVER_PORT=8888
define("port", default=WEB_SERVER_PORT, help="run on the given port", type=int)
import MadisInstance
from MadisInstance import QueryExecutionException
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler)
]
tornado.web.Application.__init__(self, handlers)
class BaseHandler(tornado.web.RequestHandler):
def __init__(self, *args):
tornado.web.RequestHandler.__init__(self, *args)
class MainHandler(BaseHandler):
#logging stuff..
enable_pretty_logging()
logger = logging.getLogger('MainHandler')
hdlr = logging.FileHandler('/var/log/MadisServer.log','w+')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
if os.environ['LOG_LEVEL'] == "DEBUG":
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
access_log = logging.getLogger("tornado.access")
app_log = logging.getLogger("tornado.application")
gen_log = logging.getLogger("tornado.general")
access_log.addHandler(hdlr)
app_log.addHandler(hdlr)
gen_log.addHandler(hdlr)
madisInstance=MadisInstance.MadisInstance(logger)
def execQuery(self,dbFilename,query):
self.logger.debug("(MadisServer::execQuery) will call madisInstance.connectToDb({})".format(dbFilename))
self.madisInstance.connectToDb(dbFilename)
try:
self.logger.debug("(MadisServer::execQuery) will call madisInstance.execute({})".format(query))
result= self.madisInstance.execute(query)
finally:
self.madisInstance.closeConnectionToDb()
return result
@tornado.gen.coroutine
def post(self):
dbFilename=self.get_argument("dbfilename")
query=self.get_argument("query")
self.logger.debug("(MadisServer::post) dbfilename={} query={}".format(dbFilename,query))
try:
str_result=self.execQuery(dbFilename,query)
except QueryExecutionException as e:
#raise tornado.web.HTTPError(status_code=500,log_message="...the log message??")
self.logger.error("(MadisServer::post) QueryExecutionException: {}".format(str(e)))
#print "QueryExecutionException ->{}".format(str(e))
self.set_status(500)
self.write(str(e))
self.finish()
return
self.logger.debug("(MadisServer::post) str_result-> {}".format(str_result))
self.write("{}".format(str_result))
self.finish()
def main():
sockets = tornado.netutil.bind_sockets(options.port)
tornado.process.fork_processes(tornado.process.cpu_count() * PROCESSES_PER_CPU)
server = tornado.httpserver.HTTPServer(Application())
server.add_sockets(sockets)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| mit | -3,267,898,687,122,339,000 | 30.357895 | 108 | 0.699564 | false |
dontnod/weblate | weblate/utils/tests/test_middleware.py | 1 | 2758 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2019 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from unittest import TestCase
from django.http.request import HttpRequest
from django.test.utils import override_settings
from weblate.middleware import ProxyMiddleware
class ProxyTest(TestCase):
def get_response(self, request):
self.assertEqual(request.META['REMOTE_ADDR'], '1.2.3.4')
return 'response'
@override_settings(
IP_BEHIND_REVERSE_PROXY=False,
IP_PROXY_HEADER='HTTP_X_FORWARDED_FOR',
IP_PROXY_OFFSET=0
)
def test_direct(self):
request = HttpRequest()
request.META['REMOTE_ADDR'] = '1.2.3.4'
middleware = ProxyMiddleware(self.get_response)
self.assertEqual(middleware(request), 'response')
@override_settings(
IP_BEHIND_REVERSE_PROXY=True,
IP_PROXY_HEADER='HTTP_X_FORWARDED_FOR',
IP_PROXY_OFFSET=0
)
def test_proxy(self):
request = HttpRequest()
request.META['REMOTE_ADDR'] = '7.8.9.0'
request.META['HTTP_X_FORWARDED_FOR'] = '1.2.3.4'
middleware = ProxyMiddleware(self.get_response)
self.assertEqual(middleware(request), 'response')
@override_settings(
IP_BEHIND_REVERSE_PROXY=True,
IP_PROXY_HEADER='HTTP_X_FORWARDED_FOR',
IP_PROXY_OFFSET=1
)
def test_proxy_second(self):
request = HttpRequest()
request.META['REMOTE_ADDR'] = '7.8.9.0'
request.META['HTTP_X_FORWARDED_FOR'] = '2.3.4.5, 1.2.3.4'
middleware = ProxyMiddleware(self.get_response)
self.assertEqual(middleware(request), 'response')
@override_settings(
IP_BEHIND_REVERSE_PROXY=True,
IP_PROXY_HEADER='HTTP_X_FORWARDED_FOR',
IP_PROXY_OFFSET=0
)
def test_proxy_invalid(self):
request = HttpRequest()
request.META['REMOTE_ADDR'] = '1.2.3.4'
request.META['HTTP_X_FORWARDED_FOR'] = '2.3.4'
middleware = ProxyMiddleware(self.get_response)
self.assertEqual(middleware(request), 'response')
| gpl-3.0 | 8,510,525,146,067,863,000 | 33.873418 | 72 | 0.66461 | false |
mheap/ansible | lib/ansible/module_utils/basic.py | 1 | 116232 | # Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# Copyright (c), Toshio Kuratomi <[email protected]> 2016
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
PASS_VARS = {
'check_mode': 'check_mode',
'debug': '_debug',
'diff': '_diff',
'keep_remote_files': '_keep_remote_files',
'module_name': '_name',
'no_log': 'no_log',
'remote_tmp': '_remote_tmp',
'selinux_special_fs': '_selinux_special_fs',
'shell_executable': '_shell',
'socket': '_socket_path',
'syslog_facility': '_syslog_facility',
'tmpdir': '_tmpdir',
'verbosity': '_verbosity',
'version': 'ansible_version',
}
PASS_BOOLS = ('no_log', 'debug', 'diff')
# Ansible modules can be written in any language.
# The functions available here can be used to do many common tasks,
# to simplify development of Python modules.
import atexit
import locale
import os
import re
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from itertools import chain, repeat
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
else:
sj_version = json.__version__.split('.')
if sj_version < ['1', '6']:
# Version 1.5 released 2007-01-18 does not have the encoding parameter which we need
print('\n{"msg": "Error: Ansible requires the stdlib json or simplejson >= 1.6. Neither was found!", "failed": true}')
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
from ansible.module_utils.common._collections_compat import (
deque,
KeysView,
Mapping, MutableMapping,
Sequence, MutableSequence,
Set, MutableSet,
)
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
SEQUENCETYPE = frozenset, KeysView, Sequence
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
# These are things we want. About setting metadata (mode, ownership, permissions in general) on
# created files (these are used by set_fs_attributes_if_different and included in
# load_file_common_arguments)
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
attributes=dict(aliases=['attr']),
# The following are not about perms and should not be in a rewritten file_common_args
src=dict(), # Maybe dest or path would be appropriate but src is not
follow=dict(type='bool', default=False), # Maybe follow is appropriate because it determines whether to follow symlinks for permission purposes too
force=dict(type='bool'),
# not taken by the file module, but other action plugins call the file module so this ignores
# them for now. In the future, the caller should take care of removing these from the module
# arguments before calling the file module.
content=dict(no_log=True), # used by copy
backup=dict(), # Used by a few modules to create a remote backup before updating the file
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
PERM_BITS = 0o7777 # file mode permission bits
EXEC_PERM_BITS = 0o0111 # execute permission bits
DEFAULT_PERM = 0o0666 # default file permission bits
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
_PY3_MIN = sys.version_info[:2] >= (3, 5)
_PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,)
_PY_MIN = _PY3_MIN or _PY2_MIN
if not _PY_MIN:
print(
'\n{"failed": true, '
'"msg": "Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines())
)
sys.exit(1)
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch', 'alpine', 'devuan')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (size / limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key, attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
def _json_encode_fallback(obj):
if isinstance(obj, Set):
return list(obj)
elif isinstance(obj, datetime.datetime):
return obj.isoformat()
raise TypeError("Cannot json serialize %s" % to_native(obj))
def jsonify(data, **kwargs):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding, default=_json_encode_fallback, **kwargs)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data, default=_json_encode_fallback, **kwargs)
except UnicodeDecodeError:
continue
raise UnicodeError('Invalid unicode encoding encountered')
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
# Check whether code set this explicitly for deprecation purposes
if check_invalid_arguments is None:
check_invalid_arguments = True
module_set_check_invalid_arguments = False
else:
module_set_check_invalid_arguments = True
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._shell = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self._clean = {}
self.aliases = {}
self._legal_inputs = ['_ansible_%s' % k for k in PASS_VARS]
self._options_context = list()
self._tmpdir = None
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception as e:
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
# Do this at the end so that logging parameters have been set up
# This is to warn third party module authors that the functionatlity is going away.
# We exclude uri and zfs as they have their own deprecation warnings for users and we'll
# make sure to update their code to stop using check_invalid_arguments when 2.9 rolls around
if module_set_check_invalid_arguments and self._name not in ('uri', 'zfs'):
self.deprecate('Setting check_invalid_arguments is deprecated and will be removed.'
' Update the code for this module In the future, AnsibleModule will'
' always check for invalid arguments.', version='2.9')
@property
def tmpdir(self):
# if _ansible_tmpdir was not set, the module needs to create it and
# clean it up once finished.
if self._tmpdir is None:
basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp))
if not os.path.exists(basedir):
self.warn("Module remote_tmp %s did not exist and was created "
"with a mode of 0700, this may cause issues when "
"running as another user. To avoid this, create the "
"remote_tmp dir with the correct permissions "
"manually" % basedir)
os.makedirs(basedir, mode=0o700)
basefile = "ansible-moduletmp-%s-" % time.time()
tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir)
if not self._keep_remote_files:
atexit.register(shutil.rmtree, tmpdir)
self._tmpdir = tmpdir
return self._tmpdir
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
if self.check_file_absent_if_check_mode(path):
return True
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
if owner is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except (IOError, OSError) as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
if group is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
if mode is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if self.check_file_absent_if_check_mode(b_path):
return True
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
existing = self.get_file_attributes(b_path)
if existing.get('attr_flags', '') != attributes:
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '=%s' % attributes, b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = attributes
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
self.fail_json(path=to_text(b_path), msg='chattr failed',
details=to_native(e), exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split()
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def check_file_absent_if_check_mode(self, file_path):
return self.check_mode and not os.path.exists(file_path)
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _handle_aliases(self, spec=None, param=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} # alias:canon
if param is None:
param = self.params
if spec is None:
spec = self.argument_spec
for (k, v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in param:
param[k] = param[alias]
return aliases_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = param.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in param:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for (k, v) in list(param.items()):
if check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
elif k.startswith('_ansible_'):
# handle setting internal properties from internal ansible vars
key = k.replace('_ansible_', '')
if key in PASS_BOOLS:
setattr(self, PASS_VARS[key], self.boolean(v))
else:
setattr(self, PASS_VARS[key], v)
# clean up internal params:
del self.params[k]
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ', '.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (', '.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
count = 0
if param is None:
param = self.params
for term in check:
if term in param:
count += 1
return count
def _check_mutually_exclusive(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count > 1:
msg = "parameters are mutually exclusive: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count == 0:
msg = "one of the following is required: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
for check in spec:
counts = [self._count_terms([field], param) for field in check]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
msg = "parameters are required together: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_arguments(self, spec=None, param=None):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
msg = "missing required arguments: %s" % ", ".join(missing)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
term = 'any'
else:
term = 'all'
if key in param and param[key] == val:
for check in requirements:
count = self._count_terms((check,), param)
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
msg = "%s is %s but %s of the following are missing: %s" % (key, val, term, ', '.join(missing))
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
# Allow one or more when type='list' param with choices
if isinstance(param[k], list):
diff_list = ", ".join([item for item in param[k] if item not in choices])
if diff_list:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one or more of: %s. Got no match for: %s" % (k, choices_str, diff_list)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
elif param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception as e:
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [str(value)]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return self.jsonify(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if v.get('apply_defaults', False):
if spec is not None:
if params.get(k) is None:
params[k] = {}
else:
continue
elif spec is None or k not in params or params[k] is None:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(v.get('required_together', None), param)
self._check_required_one_of(v.get('required_one_of', None), param)
self._check_required_if(v.get('required_if', None), param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if param[k] is None:
continue
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
try:
param[k] = type_checker(value)
except (TypeError, ValueError) as e:
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" %
(k, type(value), wanted, to_native(e)))
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=None):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
opt_dirs = [] if opt_dirs is None else opt_dirs
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
try:
return jsonify(data)
except UnicodeError as e:
self.fail_json(msg=to_text(e))
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
if 'msg' not in kwargs:
raise AssertionError("implementation error -- msg to explain the error is required")
kwargs['failed'] = True
# add traceback if debug or high verbosity and it is missing
# Note: badly named as exception, it is really always been 'traceback'
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ', '.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', '')
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
else:
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
b_dest_dir = os.path.dirname(b_dest)
b_suffix = os.path.basename(b_dest)
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp',
dir=b_dest_dir, suffix=b_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating tmp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tmpdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
out_dest = in_src = None
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def _clean_args(self, args):
if not self._clean:
# create a printable version of the command for use in reporting later,
# which strips out things like passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
self._clean = ' '.join(shlex_quote(arg) for arg in clean_args)
return self._clean
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
# used by clean args later on
self._clean = None
if not isinstance(args, (list, binary_type, text_type)):
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
# stringify args for unsafe/direct shell usage
if isinstance(args, list):
args = " ".join([shlex_quote(x) for x in args])
# not set explicitly, check if set by controller
if executable:
args = [executable, '-c', args]
elif self._shell not in (None, '/bin/sh'):
args = [self._shell, '-c', args]
else:
shell = True
else:
# ensure args are a list
if isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
# expand shellisms
args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None]
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError) as e:
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)),
exception=traceback.format_exc())
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + self._clean_args(args))
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=self._clean_args(args))
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
| gpl-3.0 | 449,926,905,707,652,200 | 38.615542 | 155 | 0.553841 | false |
silverfield/pythonsessions | s12_chat/chat_client.py | 1 | 3462 | # ---------------------------------------------------------------
# Imports
# ---------------------------------------------------------------
import sys
import socket
import select
import time
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from s12_chat import chat_settings
# ---------------------------------------------------------------
# Class
# ---------------------------------------------------------------
class ChatClient:
"""Simple implementation of a chat client"""
# ---------------------------------------------------------------
# Initialisation
# ---------------------------------------------------------------
def __init__(self, nick, server_hostname, server_port=chat_settings.SERVER_PORT):
self._server_hostname = server_hostname
self._server_port = server_port
self._nick = nick
# set up client socket
self._client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._client_sock.settimeout(2) # put to timeout mode
try:
self._client_sock.connect((self._server_hostname, self._server_port))
except ConnectionRefusedError:
print("Server probably not running at {}:{}".format(server_hostname, server_port))
exit(0)
self._client_sock.send(self._nick.encode())
print("Chat server on " + str(self._client_sock.getpeername()))
print("You are on " + str(self._client_sock.getsockname()))
# ---------------------------------------------------------------
# Interface
# ---------------------------------------------------------------
def start_chatting(self):
print("Hi " + str(self._nick) + "! You're connected to the chat server. You can start sending messages")
self.__prompt()
socket_list = [sys.stdin, self._client_sock]
while True:
time.sleep(0.01)
# get the list sockets which are readable
r_sockets, _, _ = select.select(socket_list, [], [])
for sock in r_sockets:
if sock == self._client_sock: # incoming message from server
data = sock.recv(chat_settings.BUFFER_SIZE).decode()
if not data:
print("Server shut down. Terminating...")
exit(0)
print()
print(data)
self.__prompt()
else: # user entered a message
msg = sys.stdin.readline()
self._client_sock.send(msg.encode())
self.__prompt()
# ---------------------------------------------------------------
# Implementation
# ---------------------------------------------------------------
def __prompt(self):
sys.stdout.write("[" + self._nick + "] ")
sys.stdout.flush()
# ---------------------------------------------------------------
# Main
# ---------------------------------------------------------------
def main(argv):
if len(argv) < 2:
print("Provide arguments: nick server_hostname [server_port]")
exit(1)
nick = argv[0]
server_hostname = argv[1]
server_port = chat_settings.SERVER_PORT
if len(argv) >= 3:
server_port = int(argv[2])
client = ChatClient(nick, server_hostname, server_port)
client.start_chatting()
if __name__ == '__main__':
main(sys.argv[1:]) | mit | 9,178,664,240,059,496,000 | 33.63 | 112 | 0.431831 | false |
openstack/neutron-lib | neutron_lib/api/validators/availability_zone.py | 1 | 1626 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils
from neutron_lib._i18n import _
from neutron_lib.api import validators
from neutron_lib.db import constants as db_const
from neutron_lib import exceptions
def convert_az_list_to_string(az_list):
"""Convert a list of availability zones into a string.
:param az_list: A list of AZs.
:returns: The az_list in string format.
"""
return jsonutils.dumps(az_list)
def convert_az_string_to_list(az_string):
"""Convert an AZ list in string format into a python list.
:param az_string: The AZ list in string format.
:returns: The python list of AZs build from az_string.
"""
return jsonutils.loads(az_string) if az_string else []
def _validate_availability_zone_hints(data, valid_value=None):
msg = validators.validate_list_of_unique_strings(data)
if msg:
return msg
az_string = convert_az_list_to_string(data)
if len(az_string) > db_const.AZ_HINTS_DB_LEN:
msg = _("Too many availability_zone_hints specified")
raise exceptions.InvalidInput(error_message=msg)
| apache-2.0 | -3,779,166,133,405,238,300 | 33.595745 | 69 | 0.725092 | false |
ngageoint/scale | scale/metrics/models.py | 1 | 49093 | """Defines the database models for various system metrics."""
from __future__ import unicode_literals
import datetime
import logging
import sys
import django.contrib.gis.db.models as models
import django.utils.timezone as timezone
from django.db import transaction
from error.models import Error
from job.models import Job, JobExecutionEnd, JobType
from ingest.models import Ingest, Strike
from metrics.registry import MetricsPlotData, MetricsType, MetricsTypeGroup, MetricsTypeFilter
logger = logging.getLogger(__name__)
class PlotBigIntegerField(models.BigIntegerField):
"""Custom field used to indicate a model attribute can be used as a plot value.
:keyword verbose_name: The display name of the field.
:type verbose_name: string
:keyword name: The internal database name of the field.
:type name: string
:keyword aggregate: The math operation used to compute the value. Examples: avg, max, min, sum
:type aggregate: string
:keyword group: The base field name used to group together related values. For example, a field may have several
aggregate variations that all reference the same base attribute.
:type group: string
:keyword units: The mathematical units applied to the value. Examples: seconds, minutes, hours
:type units: string
"""
def __init__(self, verbose_name=None, name=None, aggregate=None, group=None, units=None, **kwargs):
self.aggregate = aggregate
self.group = group
self.units = units
super(PlotBigIntegerField, self).__init__(verbose_name, name, **kwargs)
class PlotIntegerField(models.IntegerField):
"""Custom field used to indicate a model attribute can be used as a plot value.
:keyword verbose_name: The display name of the field.
:type verbose_name: string
:keyword name: The internal database name of the field.
:type name: string
:keyword aggregate: The math operation used to compute the value. Examples: avg, max, min, sum
:type aggregate: string
:keyword group: The base field name used to group together related values. For example, a field may have several
aggregate variations that all reference the same base attribute.
:type group: string
:keyword units: The mathematical units applied to the value. Examples: seconds, minutes, hours
:type units: string
"""
def __init__(self, verbose_name=None, name=None, aggregate=None, group=None, units=None, **kwargs):
self.aggregate = aggregate
self.group = group
self.units = units
super(PlotIntegerField, self).__init__(verbose_name, name, **kwargs)
PLOT_FIELD_TYPES = [PlotBigIntegerField, PlotIntegerField]
class MetricsErrorManager(models.Manager):
"""Provides additional methods for computing daily error metrics."""
def calculate(self, date):
"""See :meth:`metrics.registry.MetricsTypeProvider.calculate`."""
started = datetime.datetime.combine(date, datetime.time.min).replace(tzinfo=timezone.utc)
ended = datetime.datetime.combine(date, datetime.time.max).replace(tzinfo=timezone.utc)
# Fetch all the job executions with an error for the requested day
job_exe_ends = JobExecutionEnd.objects.filter(error__is_builtin=True, ended__gte=started, ended__lte=ended)
job_exe_ends = job_exe_ends.select_related('error')
# Calculate the overall counts based on job status
entry_map = {}
for job_exe_end in job_exe_ends.iterator():
occurred_datetime = job_exe_end.ended if job_exe_end.ended else date
entry_date_time = datetime.datetime(occurred_datetime.year, occurred_datetime.month, occurred_datetime.day,
occurred_datetime.hour, tzinfo=timezone.utc)
if job_exe_end.error not in entry_map:
entry_map[job_exe_end.error] = {}
if entry_date_time not in entry_map[job_exe_end.error]:
entry = MetricsError(error=job_exe_end.error, occurred=entry_date_time, created=timezone.now())
entry.total_count = 0
entry_map[job_exe_end.error][entry_date_time] = entry
entry = entry_map[job_exe_end.error][entry_date_time]
entry.total_count += 1
# Save the new metrics to the database
for entry in entry_map:
for entry_time in entry_map[entry]:
self._replace_entries(entry_time, entry, [entry_map[entry][entry_time]])
def get_metrics_type(self, include_choices=False):
"""See :meth:`metrics.registry.MetricsTypeProvider.get_metrics_type`."""
# Create the metrics type definition
metrics_type = MetricsType('errors', 'Errors', 'Metrics for jobs grouped by errors.')
metrics_type.filters = [MetricsTypeFilter('name', 'string'), MetricsTypeFilter('category', 'string')]
metrics_type.groups = MetricsError.GROUPS
metrics_type.set_columns(MetricsError, PLOT_FIELD_TYPES)
# Optionally include all the possible error choices
if include_choices:
metrics_type.choices = Error.objects.filter(is_builtin=True)
return metrics_type
def get_plot_data(self, started=None, ended=None, choice_ids=None, columns=None):
"""See :meth:`metrics.registry.MetricsTypeProvider.get_plot_data`."""
# Fetch all the matching job type metrics based on query filters
entries = MetricsError.objects.all().order_by('occurred')
if started:
entries = entries.filter(occurred__gte=started)
if ended:
entries = entries.filter(occurred__lte=ended)
if choice_ids:
entries = entries.filter(error_id__in=choice_ids)
if not columns:
columns = self.get_metrics_type().columns
column_names = [c.name for c in columns]
entries = entries.values('error_id', 'occurred', *column_names)
# Convert the database models to plot models
return MetricsPlotData.create(entries, 'occurred', 'error_id', choice_ids, columns)
@transaction.atomic
def _replace_entries(self, date, error, entries):
"""Replaces all the existing metric entries for the given date with new ones.
:param date: The date when job executions associated with the metrics ended.
:type date: datetime.date
:param entries: The new metrics model to save.
:type entries: list[:class:`metrics.models.MetricsError`]
"""
# Delete all the previous metrics entries
MetricsError.objects.filter(occurred=date, error=error).delete()
# Save all the new metrics models
MetricsError.objects.bulk_create(entries)
class MetricsError(models.Model):
"""Tracks all the error metrics grouped by error type.
:keyword error: The error type associated with these metrics.
:type error: :class:`django.db.models.ForeignKey`
:keyword occurred: The date when the errors included in this model were created.
:type occurred: :class:`django.db.models.DateField`
:keyword total_count: The total number of errors of this type that occurred for the day.
:type total_count: :class:`metrics.models.PlotBigIntegerField`
:keyword created: When the model was first created.
:type created: :class:`django.db.models.DateTimeField`
"""
GROUPS = [
MetricsTypeGroup('overview', 'Overview', 'Overall counts based on error type.'),
]
error = models.ForeignKey('error.Error', on_delete=models.PROTECT)
occurred = models.DateTimeField(db_index=True)
total_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of jobs that failed with a particular error type.', null=True,
units='count', verbose_name='Total Count')
created = models.DateTimeField(auto_now_add=True)
objects = MetricsErrorManager()
class Meta(object):
"""meta information for the db"""
db_table = 'metrics_error'
class MetricsIngestManager(models.Manager):
"""Provides additional methods for computing daily ingest metrics."""
def calculate(self, date):
"""See :meth:`metrics.registry.MetricsTypeProvider.calculate`."""
started = datetime.datetime.combine(date, datetime.time.min).replace(tzinfo=timezone.utc)
ended = datetime.datetime.combine(date, datetime.time.max).replace(tzinfo=timezone.utc)
# Fetch all the ingests relevant for metrics
ingests = Ingest.objects.filter(status__in=['DEFERRED', 'INGESTED', 'ERRORED', 'DUPLICATE'],
ingest_ended__gte=started, ingest_ended__lte=ended, strike__isnull=False)
ingests = ingests.select_related('strike').defer('strike__configuration')
# Calculate the overall counts based on ingest status
entry_map = {}
for ingest in ingests.iterator():
occurred_datetime = ingest.ingest_ended if ingest.ingest_ended else date
entry_datetime = datetime.datetime(occurred_datetime.year, occurred_datetime.month, occurred_datetime.day,
occurred_datetime.hour, tzinfo=timezone.utc)
if ingest.strike not in entry_map:
entry_map[ingest.strike] = {}
if entry_datetime not in entry_map[ingest.strike]:
entry = MetricsIngest(strike=ingest.strike, occurred=entry_datetime, created=timezone.now())
entry.deferred_count = 0
entry.ingested_count = 0
entry.errored_count = 0
entry.duplicate_count = 0
entry.total_count = 0
entry_map[ingest.strike][entry_datetime] = entry
entry = entry_map[ingest.strike][entry_datetime]
self._update_metrics(entry_datetime, ingest, entry)
# Save the new metrics to the database
for entry in entry_map:
for entry_time in entry_map[entry]:
self._replace_entries(entry_time, entry, [entry_map[entry][entry_time]])
def get_metrics_type(self, include_choices=False):
"""See :meth:`metrics.registry.MetricsTypeProvider.get_metrics_type`."""
# Create the metrics type definition
metrics_type = MetricsType('ingests', 'Ingests', 'Metrics for ingests grouped by strike process.')
metrics_type.filters = [MetricsTypeFilter('name', 'string')]
metrics_type.groups = MetricsIngest.GROUPS
metrics_type.set_columns(MetricsIngest, PLOT_FIELD_TYPES)
# Optionally include all the possible strike choices
if include_choices:
metrics_type.choices = Strike.objects.all()
return metrics_type
def get_plot_data(self, started=None, ended=None, choice_ids=None, columns=None):
"""See :meth:`metrics.registry.MetricsTypeProvider.get_plot_data`."""
# Fetch all the matching ingest metrics based on query filters
entries = MetricsIngest.objects.all().order_by('occurred')
if started:
entries = entries.filter(occurred__gte=started)
if ended:
entries = entries.filter(occurred__lte=ended)
if choice_ids:
entries = entries.filter(strike_id__in=choice_ids)
if not columns:
columns = self.get_metrics_type().columns
column_names = [c.name for c in columns]
entries = entries.values('strike_id', 'occurred', *column_names)
# Convert the database models to plot models
return MetricsPlotData.create(entries, 'occurred', 'strike_id', choice_ids, columns)
def _update_metrics(self, date, ingest, entry):
"""Updates the metrics model attributes for a single ingest.
:param date: The date when ingests associated with the metrics ended.
:type date: datetime.date
:param ingest: The ingest from which to derive statistics.
:type ingest: :class:`ingest.models.Ingest`
:param entry: The metrics model to update.
:type entry: :class:`metrics.models.MetricsIngest`
"""
if ingest.status == 'DEFERRED':
entry.deferred_count += 1
entry.total_count += 1
elif ingest.status == 'INGESTED':
entry.ingested_count += 1
entry.total_count += 1
elif ingest.status == 'ERRORED':
entry.errored_count += 1
entry.total_count += 1
elif ingest.status == 'DUPLICATE':
entry.duplicate_count += 1
entry.total_count += 1
# Update file size metrics
if ingest.file_size:
entry._file_count = (entry._file_count if hasattr(entry, '_file_count') else 0) + 1
entry.file_size_sum = (entry.file_size_sum or 0) + ingest.file_size
entry.file_size_min = min(entry.file_size_min or sys.maxint, ingest.file_size)
entry.file_size_max = max(entry.file_size_max or 0, ingest.file_size)
entry.file_size_avg = entry.file_size_sum / entry._file_count
# Update elapsed transfer time metrics
if ingest.transfer_started and ingest.transfer_ended:
transfer_secs = max((ingest.transfer_ended - ingest.transfer_started).total_seconds(), 0)
entry._transfer_count = (entry._transfer_count if hasattr(entry, '_transfer_count') else 0) + 1
entry.transfer_time_sum = (entry.transfer_time_sum or 0) + transfer_secs
entry.transfer_time_min = min(entry.transfer_time_min or sys.maxint, transfer_secs)
entry.transfer_time_max = max(entry.transfer_time_max or 0, transfer_secs)
entry.transfer_time_avg = entry.transfer_time_sum / entry._transfer_count
# Update elapsed ingest time metrics
if ingest.status == 'INGESTED' and ingest.ingest_started and ingest.ingest_ended:
ingest_secs = max((ingest.ingest_ended - ingest.ingest_started).total_seconds(), 0)
entry._ingest_count = (entry._ingest_count if hasattr(entry, '_ingest_count') else 0) + 1
entry.ingest_time_sum = (entry.ingest_time_sum or 0) + ingest_secs
entry.ingest_time_min = min(entry.ingest_time_min or sys.maxint, ingest_secs)
entry.ingest_time_max = max(entry.ingest_time_max or 0, ingest_secs)
entry.ingest_time_avg = entry.ingest_time_sum / entry._ingest_count
return entry
@transaction.atomic
def _replace_entries(self, date, strike, entries):
"""Replaces all the existing metric entries for the given date with new ones.
:param date: The date when ingests associated with the metrics ended.
:type date: datetime.date
:param entries: The new metrics model to save.
:type entries: list[:class:`metrics.models.MetricsIngest`]
"""
# Delete all the previous metrics entries
MetricsIngest.objects.filter(occurred=date, strike=strike).delete()
# Save all the new metrics models
MetricsIngest.objects.bulk_create(entries)
class MetricsIngest(models.Model):
"""Tracks all the ingest metrics grouped by strike process.
:keyword strike: The strike process associated with these metrics.
:type strike: :class:`django.db.models.ForeignKey`
:keyword occurred: The date when the ingests included in this model were ended.
:type occurred: :class:`django.db.models.DateField`
:keyword deferred_count: The total number of deferred ingests.
:type deferred_count: :class:`metrics.models.PlotBigIntegerField`
:keyword ingested_count: The total number of successfully completed ingests.
:type ingested_count: :class:`metrics.models.PlotBigIntegerField`
:keyword errored_count: The total number of failed ingests.
:type errored_count: :class:`metrics.models.PlotBigIntegerField`
:keyword duplicate_count: The total number of duplicated ingests.
:type duplicate_count: :class:`metrics.models.PlotBigIntegerField`
:keyword file_size_sum: The total size of ingested files in bytes.
:type file_size_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword file_size_min: The minimum size of ingested files in bytes.
:type file_size_min: :class:`metrics.models.PlotBigIntegerField`
:keyword file_size_max: The maximum size of ingested files in bytes.
:type file_size_max: :class:`metrics.models.PlotBigIntegerField`
:keyword file_size_avg: The average size of ingested files in bytes.
:type file_size_avg: :class:`metrics.models.PlotBigIntegerField`
:keyword transfer_time_sum: The total time spent transferring ingested files in seconds.
:type transfer_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword transfer_time_min: The minimum time spent transferring ingested files in seconds.
:type transfer_time_min: :class:`metrics.models.PlotIntegerField`
:keyword transfer_time_max: The maximum time spent transferring ingested files in seconds.
:type transfer_time_max: :class:`metrics.models.PlotIntegerField`
:keyword transfer_time_avg: The average time spent transferring ingested files in seconds.
:type transfer_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword ingest_time_sum: The total time spent ingesting files in seconds.
:type ingest_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword ingest_time_min: The minimum time spent ingesting files in seconds.
:type ingest_time_min: :class:`metrics.models.PlotIntegerField`
:keyword ingest_time_max: The maximum time spent ingesting files in seconds.
:type ingest_time_max: :class:`metrics.models.PlotIntegerField`
:keyword ingest_time_avg: The average time spent ingesting files in seconds.
:type ingest_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword created: When the model was first created.
:type created: :class:`django.db.models.DateTimeField`
"""
GROUPS = [
MetricsTypeGroup('overview', 'Overview', 'Overall counts based on ingest status.'),
MetricsTypeGroup('file_size', 'File Size', 'Size information about ingested files.'),
MetricsTypeGroup('transfer_time', 'Transfer Time', 'When files were being transferred before ingest.'),
MetricsTypeGroup('ingest_time', 'Ingest Time', 'When files were processed during ingest.'),
]
strike = models.ForeignKey('ingest.Strike', on_delete=models.PROTECT)
occurred = models.DateTimeField(db_index=True)
deferred_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of files deferred (ignored) by the ingest process.',
null=True, units='count', verbose_name='Deferred Count')
ingested_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of files successfully ingested.', null=True, units='count',
verbose_name='Ingested Count')
errored_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of files that failed to ingest.', null=True, units='count',
verbose_name='Errored Count')
duplicate_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of files that were duplicates of previous ingests.',
null=True, units='count', verbose_name='Duplicate Count')
total_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of deferred, ingested, errored, and duplicate ingests.',
null=True, units='count', verbose_name='Total Count')
file_size_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='file_size',
help_text='Total size of ingested files.', null=True, units='bytes',
verbose_name='File Size (Sum)')
file_size_min = PlotBigIntegerField(aggregate='min', blank=True, group='file_size',
help_text='Minimum size of ingested files.', null=True, units='bytes',
verbose_name='File Size (Min)')
file_size_max = PlotBigIntegerField(aggregate='max', blank=True, group='file_size',
help_text='Maximum size of ingested files.',
null=True, units='bytes', verbose_name='File Size (Max)')
file_size_avg = PlotBigIntegerField(aggregate='avg', blank=True, group='file_size',
help_text='Average size of ingested files.', null=True,
units='bytes', verbose_name='File Size (Avg)')
transfer_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='transfer_time',
help_text='Total time spent transferring files before ingest.', null=True,
units='seconds', verbose_name='Transfer Time (Sum)')
transfer_time_min = PlotIntegerField(aggregate='min', blank=True, group='transfer_time',
help_text='Minimum time spent transferring files before ingest.', null=True,
units='seconds', verbose_name='Transfer Time (Min)')
transfer_time_max = PlotIntegerField(aggregate='max', blank=True, group='transfer_time',
help_text='Maximum time spent transferring files before ingest.', null=True,
units='seconds', verbose_name='Transfer Time (Max)')
transfer_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='transfer_time',
help_text='Average time spent transferring files before ingest.',
null=True, units='seconds', verbose_name='Transfer Time (Avg)')
ingest_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='ingest_time',
help_text='Total time spent processing files during ingest.',
null=True, units='seconds', verbose_name='Ingest Time (Sum)')
ingest_time_min = PlotIntegerField(aggregate='min', blank=True, group='ingest_time',
help_text='Minimum time spent processing files during ingest.',
null=True, units='seconds', verbose_name='Ingest Time (Min)')
ingest_time_max = PlotIntegerField(aggregate='max', blank=True, group='ingest_time',
help_text='Maximum time spent processing files during ingest.',
null=True, units='seconds', verbose_name='Ingest Time (Max)')
ingest_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='ingest_time',
help_text='Average time spent processing files during ingest.',
null=True, units='seconds', verbose_name='Ingest Time (Avg)')
created = models.DateTimeField(auto_now_add=True)
objects = MetricsIngestManager()
class Meta(object):
"""meta information for the db"""
db_table = 'metrics_ingest'
class MetricsJobTypeManager(models.Manager):
"""Provides additional methods for computing daily job type metrics."""
def calculate(self, date):
"""See :meth:`metrics.registry.MetricsTypeProvider.calculate`."""
started = datetime.datetime.combine(date, datetime.time.min).replace(tzinfo=timezone.utc)
ended = datetime.datetime.combine(date, datetime.time.max).replace(tzinfo=timezone.utc)
# Fetch all the jobs relevant for metrics
jobs = Job.objects.filter(status__in=['CANCELED', 'COMPLETED', 'FAILED'], ended__gte=started, ended__lte=ended)
jobs = jobs.select_related('job_type', 'error').defer('input', 'output')
# Calculate the overall counts based on job status
entry_map = {}
for job in jobs.iterator():
occurred_datetime = job.ended if job.ended else date
entry_date_time = datetime.datetime(occurred_datetime.year, occurred_datetime.month, occurred_datetime.day,
occurred_datetime.hour, tzinfo=timezone.utc)
if job.job_type not in entry_map:
entry_map[job.job_type] = {}
if entry_date_time not in entry_map[job.job_type]:
entry = MetricsJobType(job_type=job.job_type, occurred=entry_date_time, created=timezone.now())
entry.completed_count = 0
entry.failed_count = 0
entry.canceled_count = 0
entry.total_count = 0
entry.error_system_count = 0
entry.error_data_count = 0
entry.error_algorithm_count = 0
entry_map[job.job_type][entry_date_time] = entry
entry = entry_map[job.job_type][entry_date_time]
self._update_counts(occurred_datetime, job, entry)
# Fetch all the completed job executions for the requested day
job_exe_ends = JobExecutionEnd.objects.filter(status__in=['COMPLETED'], ended__gte=started, ended__lte=ended)
job_exe_ends = job_exe_ends.select_related('job_type')
# Calculate the metrics per job execution grouped by job type
for job_exe_end in job_exe_ends.iterator():
entry = entry_map[job_exe_end.job.job_type]
for entry_time in entry:
self._update_times(entry_time, job_exe_end, entry[entry_time])
# Save the new metrics to the database
for entry in entry_map:
for entry_time in entry_map[entry]:
self._replace_entries(entry_time, entry, [entry_map[entry][entry_time]])
def get_metrics_type(self, include_choices=False):
"""See :meth:`metrics.registry.MetricsTypeProvider.get_metrics_type`."""
# Create the metrics type definition
metrics_type = MetricsType('job-types', 'Job Types', 'Metrics for jobs and executions grouped by job type.')
metrics_type.filters = [MetricsTypeFilter('name', 'string'), MetricsTypeFilter('version', 'string')]
metrics_type.groups = MetricsJobType.GROUPS
metrics_type.set_columns(MetricsJobType, PLOT_FIELD_TYPES)
# Optionally include all the possible job type choices
if include_choices:
metrics_type.choices = JobType.objects.all()
return metrics_type
def get_plot_data(self, started=None, ended=None, choice_ids=None, columns=None):
"""See :meth:`metrics.registry.MetricsTypeProvider.get_plot_data`."""
# Fetch all the matching job type metrics based on query filters
entries = MetricsJobType.objects.all().order_by('occurred')
if started:
entries = entries.filter(occurred__gte=started)
if ended:
entries = entries.filter(occurred__lte=ended)
if choice_ids:
entries = entries.filter(job_type_id__in=choice_ids)
if not columns:
columns = self.get_metrics_type().columns
column_names = [c.name for c in columns]
entries = entries.values('job_type_id', 'occurred', *column_names)
# Convert the database models to plot models
return MetricsPlotData.create(entries, 'occurred', 'job_type_id', choice_ids, columns)
def _update_counts(self, date, job, entry):
"""Updates the metrics model attributes for a single job.
:param date: The date when jobs associated with the metrics ended.
:type date: datetime.date
:param job: The job from which to derive statistics.
:type job: :class:`job.models.Job`
:param entry: The metrics model to update.
:type entry: :class:`metrics.models.MetricsJobType`
"""
if job.status == 'COMPLETED':
entry.completed_count += 1
entry.total_count += 1
elif job.status == 'FAILED':
entry.failed_count += 1
entry.total_count += 1
elif job.status == 'CANCELED':
entry.canceled_count += 1
entry.total_count += 1
if job.error:
if job.error.category == 'SYSTEM':
entry.error_system_count += 1
elif job.error.category == 'DATA':
entry.error_data_count += 1
elif job.error.category == 'ALGORITHM':
entry.error_algorithm_count += 1
def _update_times(self, date, job_exe_end, entry):
"""Updates the metrics model attributes for a single job execution.
:param date: The date when job executions associated with the metrics ended.
:type date: datetime.date
:param job_exe_end: The job execution from which to derive statistics.
:type job_exe_end: :class:`job.models.JobExecutionEnd`
:param entry: The metrics model to update.
:type entry: :class:`metrics.models.MetricsJobType`
"""
entry_count = entry.completed_count if entry.completed_count > 0 else entry.total_count
# Update elapsed queue time metrics
queue_secs = None
if job_exe_end.queued and job_exe_end.started:
queue_secs = max((job_exe_end.started - job_exe_end.queued).total_seconds(), 0)
entry.queue_time_sum = (entry.queue_time_sum or 0) + queue_secs
entry.queue_time_min = min(entry.queue_time_min or sys.maxint, queue_secs)
entry.queue_time_max = max(entry.queue_time_max or 0, queue_secs)
if entry_count:
entry.queue_time_avg = entry.queue_time_sum / entry_count
task_results = job_exe_end.get_task_results()
pull_secs = None
pull_task_length = task_results.get_task_run_length('pull')
if pull_task_length:
pull_secs = max(pull_task_length.total_seconds(), 0)
# Update elapsed pre-task time metrics
pre_secs = None
pre_task_length = task_results.get_task_run_length('pre')
if pre_task_length:
pre_secs = max(pre_task_length.total_seconds(), 0)
entry.pre_time_sum = (entry.pre_time_sum or 0) + pre_secs
entry.pre_time_min = min(entry.pre_time_min or sys.maxint, pre_secs)
entry.pre_time_max = max(entry.pre_time_max or 0, pre_secs)
if entry_count:
entry.pre_time_avg = entry.pre_time_sum / entry_count
# Update elapsed actual job time metrics
job_secs = None
job_task_length = task_results.get_task_run_length('main')
if job_task_length:
job_secs = max(job_task_length.total_seconds(), 0)
entry.job_time_sum = (entry.job_time_sum or 0) + job_secs
entry.job_time_min = min(entry.job_time_min or sys.maxint, job_secs)
entry.job_time_max = max(entry.job_time_max or 0, job_secs)
if entry_count:
entry.job_time_avg = entry.job_time_sum / entry_count
# Update elapsed post-task time metrics
post_secs = None
post_task_length = task_results.get_task_run_length('post')
if post_task_length:
post_secs = max(post_task_length.total_seconds(), 0)
entry.post_time_sum = (entry.post_time_sum or 0) + post_secs
entry.post_time_min = min(entry.post_time_min or sys.maxint, post_secs)
entry.post_time_max = max(entry.post_time_max or 0, post_secs)
if entry_count:
entry.post_time_avg = entry.post_time_sum / entry_count
# Update elapsed overall run and stage time metrics
if job_exe_end.started and job_exe_end.ended:
run_secs = max((job_exe_end.ended - job_exe_end.started).total_seconds(), 0)
entry.run_time_sum = (entry.run_time_sum or 0) + run_secs
entry.run_time_min = min(entry.run_time_min or sys.maxint, run_secs)
entry.run_time_max = max(entry.run_time_max or 0, run_secs)
if entry_count:
entry.run_time_avg = entry.run_time_sum / entry_count
stage_secs = max(run_secs - ((pull_secs or 0) + (pre_secs or 0) + (job_secs or 0) + (post_secs or 0)), 0)
entry.stage_time_sum = (entry.stage_time_sum or 0) + stage_secs
entry.stage_time_min = min(entry.stage_time_min or sys.maxint, stage_secs)
entry.stage_time_max = max(entry.stage_time_max or 0, stage_secs)
if entry_count:
entry.stage_time_avg = entry.stage_time_sum / entry_count
return entry
@transaction.atomic
def _replace_entries(self, date, job_type, entries):
"""Replaces all the existing metric entries for the given date with new ones.
:param date: The date when job executions associated with the metrics ended.
:type date: datetime.date
:param entries: The new metrics model to save.
:type entries: list[:class:`metrics.models.MetricsJobType`]
"""
# Delete all the previous metrics entries
MetricsJobType.objects.filter(occurred=date, job_type=job_type).delete()
# Save all the new metrics models
MetricsJobType.objects.bulk_create(entries)
class MetricsJobType(models.Model):
"""Tracks all the job execution metrics grouped by job type.
:keyword job_type: The type of job associated with these metrics.
:type job_type: :class:`django.db.models.ForeignKey`
:keyword occurred: The date when the job executions included in this model were ended.
:type occurred: :class:`django.db.models.DateField`
:keyword completed_count: The total number of completed job executions.
:type completed_count: :class:`metrics.models.PlotBigIntegerField`
:keyword failed_count: The total number of failed job executions.
:type failed_count: :class:`metrics.models.PlotBigIntegerField`
:keyword canceled_count: The total number of canceled job executions.
:type canceled_count: :class:`metrics.models.PlotBigIntegerField`
:keyword total_count: The total number of ended job executions (completed, failed, canceled).
:type total_count: :class:`metrics.models.PlotBigIntegerField`
:keyword error_system_count: The number of failed job executions due to a system error.
:type error_system_count: :class:`metrics.models.PlotBigIntegerField`
:keyword error_data_count: The number of failed job executions due to a data error.
:type error_data_count: :class:`metrics.models.PlotBigIntegerField`
:keyword error_algorithm_count: The number of failed job executions due to an algorithm error.
:type error_algorithm_count: :class:`metrics.models.PlotBigIntegerField`
:keyword queue_time_sum: The total time job executions were queued in seconds.
:type queue_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword queue_time_min: The minimum time a job execution was queued in seconds.
:type queue_time_min: :class:`metrics.models.PlotIntegerField`
:keyword queue_time_max: The maximum time a job execution was queued in seconds.
:type queue_time_max: :class:`metrics.models.PlotIntegerField`
:keyword queue_time_avg: The average time job executions were queued in seconds.
:type queue_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword pre_time_sum: The total time job executions were executing pre-task steps in seconds.
:type pre_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword pre_time_min: The minimum time a job execution was executing pre-task steps in seconds.
:type pre_time_min: :class:`metrics.models.PlotIntegerField`
:keyword pre_time_max: The maximum time a job execution was executing pre-task steps in seconds.
:type pre_time_max: :class:`metrics.models.PlotIntegerField`
:keyword pre_time_avg: The average time job executions were executing pre-task steps in seconds.
:type pre_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword job_time_sum: The total time job executions were executing the actual job task in seconds.
:type job_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword job_time_min: The minimum time a job execution was executing the actual job task in seconds.
:type job_time_min: :class:`metrics.models.PlotIntegerField`
:keyword job_time_max: The maximum time a job execution was executing the actual job task in seconds.
:type job_time_max: :class:`metrics.models.PlotIntegerField`
:keyword job_time_avg: The average time job executions were executing the actual job task in seconds.
:type job_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword post_time_sum: The total time job executions were executing post-task steps in seconds.
:type post_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword post_time_min: The minimum time a job execution was executing post-task steps in seconds.
:type post_time_min: :class:`metrics.models.PlotIntegerField`
:keyword post_time_max: The maximum time a job execution was executing post-task steps in seconds.
:type post_time_max: :class:`metrics.models.PlotIntegerField`
:keyword post_time_avg: The average time job executions were executing post-task steps in seconds.
:type post_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword run_time_sum: The total time job executions were running in seconds.
:type run_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword run_time_min: The minimum time a job execution was running in seconds.
:type run_time_min: :class:`metrics.models.PlotIntegerField`
:keyword run_time_max: The maximum time a job execution was running in seconds.
:type run_time_max: :class:`metrics.models.PlotIntegerField`
:keyword run_time_avg: The average time job executions were running in seconds.
:type run_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword stage_time_sum: The total time job executions spent in system staging between tasks in seconds.
:type stage_time_sum: :class:`metrics.models.PlotBigIntegerField`
:keyword stage_time_min: The minimum time a job execution spent in system staging between tasks in seconds.
:type stage_time_min: :class:`metrics.models.PlotIntegerField`
:keyword stage_time_max: The maximum time a job execution spent in system staging between tasks in seconds.
:type stage_time_max: :class:`metrics.models.PlotIntegerField`
:keyword stage_time_avg: The average time job executions spent in system staging between tasks in seconds.
:type stage_time_avg: :class:`metrics.models.PlotIntegerField`
:keyword created: When the model was first created.
:type created: :class:`django.db.models.DateTimeField`
"""
GROUPS = [
MetricsTypeGroup('overview', 'Overview', 'Overall counts based on job status.'),
MetricsTypeGroup('errors', 'Errors', 'Overall error counts based on category.'),
MetricsTypeGroup('queue_time', 'Queue Time', 'When jobs were in the queue.'),
MetricsTypeGroup('pre_time', 'Pre-task Time', 'When jobs were being prepared.'),
MetricsTypeGroup('job_time', 'Job Task Time', 'When jobs were executing their actual goal.'),
MetricsTypeGroup('post_time', 'Post-task Time', 'When jobs were being cleaned up.'),
MetricsTypeGroup('run_time', 'Run Time', 'When related tasks were run (pre, job, post).'),
MetricsTypeGroup('stage_time', 'Stage Time', 'Times related to the overhead of the system.'),
]
job_type = models.ForeignKey('job.JobType', on_delete=models.PROTECT)
occurred = models.DateTimeField(db_index=True)
completed_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of successfully completed jobs.', null=True, units='count',
verbose_name='Completed Count')
failed_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of incomplete failed jobs.', null=True, units='count',
verbose_name='Failed Count')
canceled_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of incomplete canceled jobs.', null=True, units='count',
verbose_name='Canceled Count')
total_count = PlotBigIntegerField(aggregate='sum', blank=True, group='overview',
help_text='Number of completed, failed, and canceled jobs.', null=True,
units='count', verbose_name='Total Count')
error_system_count = PlotBigIntegerField(aggregate='sum', blank=True, group='errors',
help_text='Number of failed jobs due to a system error.', null=True,
units='count', verbose_name='System Error Count')
error_data_count = PlotBigIntegerField(aggregate='sum', blank=True, group='errors',
help_text='Number of failed jobs due to a data error.', null=True,
units='count', verbose_name='Data Error Count')
error_algorithm_count = PlotBigIntegerField(aggregate='sum', blank=True, group='errors',
help_text='Number of failed jobs due to an algorithm error.', null=True,
units='count', verbose_name='Algorithm Error Count')
queue_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='queue_time',
help_text='Total time the job waited in the queue.', null=True,
units='seconds', verbose_name='Queue Time (Sum)')
queue_time_min = PlotIntegerField(aggregate='min', blank=True, group='queue_time',
help_text='Minimum time the job waited in the queue.', null=True, units='seconds',
verbose_name='Queue Time (Min)')
queue_time_max = PlotIntegerField(aggregate='max', blank=True, group='queue_time',
help_text='Maximum time the job waited in the queue.',
null=True, units='seconds', verbose_name='Queue Time (Max)')
queue_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='queue_time',
help_text='Average time the job waited in the queue.', null=True,
units='seconds', verbose_name='Queue Time (Avg)')
pre_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='pre_time',
help_text='Total time spent preparing the job task.', null=True, units='seconds',
verbose_name='Pre-task Time (Sum)')
pre_time_min = PlotIntegerField(aggregate='min', blank=True, group='pre_time',
help_text='Minimum time spent preparing the job task.', null=True, units='seconds',
verbose_name='Pre-task Time (Min)')
pre_time_max = PlotIntegerField(aggregate='max', blank=True, group='pre_time',
help_text='Maximum time spent preparing the job task.', null=True, units='seconds',
verbose_name='Pre-task Time (Max)')
pre_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='pre_time',
help_text='Average time spent preparing the job task.',
null=True, units='seconds', verbose_name='Pre-task Time (Avg)')
job_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='job_time',
help_text='Total time spent running the job task.',
null=True, units='seconds', verbose_name='Job Task Time (Sum)')
job_time_min = PlotIntegerField(aggregate='min', blank=True, group='job_time',
help_text='Minimum time spent running the job task.',
null=True, units='seconds', verbose_name='Job Task Time (Min)')
job_time_max = PlotIntegerField(aggregate='max', blank=True, group='job_time',
help_text='Maximum time spent running the job task.',
null=True, units='seconds', verbose_name='Job Task Time (Max)')
job_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='job_time',
help_text='Average time spent running the job task.',
null=True, units='seconds', verbose_name='Job Task Time (Avg)')
post_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='post_time',
help_text='Total time spent finalizing the job task.',
null=True, units='seconds', verbose_name='Post-task Time (Sum)')
post_time_min = PlotIntegerField(aggregate='min', blank=True, group='post_time',
help_text='Minimum time spent finalizing the job task.',
null=True, units='seconds', verbose_name='Post-task Time (Min)')
post_time_max = PlotIntegerField(aggregate='max', blank=True, group='post_time',
help_text='Maximum time spent finalizing the job task.',
null=True, units='seconds', verbose_name='Post-task Time (Max)')
post_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='post_time',
help_text='Average time spent finalizing the job task.',
null=True, units='seconds', verbose_name='Post-task Time (Avg)')
run_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='run_time',
help_text='Total time spent running the pre, job, and post tasks.',
null=True, units='seconds', verbose_name='Run Time (Sum)')
run_time_min = PlotIntegerField(aggregate='min', blank=True, group='run_time',
help_text='Minimum time spent running the pre, job, and post tasks.',
null=True, units='seconds', verbose_name='Run Time (Min)')
run_time_max = PlotIntegerField(aggregate='max', blank=True, group='run_time',
help_text='Maximum time spent running the pre, job, and post tasks.',
null=True, units='seconds', verbose_name='Run Time (Max)')
run_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='run_time',
help_text='Average time spent running the pre, job, and post tasks.',
null=True, units='seconds', verbose_name='Run Time (Avg)')
stage_time_sum = PlotBigIntegerField(aggregate='sum', blank=True, group='stage_time',
help_text='Total overhead time spent managing tasks.',
null=True, units='seconds', verbose_name='Stage Time (Sum)')
stage_time_min = PlotIntegerField(aggregate='min', blank=True, group='stage_time',
help_text='Minimum overhead time spent managing tasks.',
null=True, units='seconds', verbose_name='Stage Time (Min)')
stage_time_max = PlotIntegerField(aggregate='min', blank=True, group='stage_time',
help_text='Maximum overhead time spent managing tasks.',
null=True, units='seconds', verbose_name='Stage Time (Max)')
stage_time_avg = PlotIntegerField(aggregate='avg', blank=True, group='stage_time',
help_text='Average overhead time spent managing tasks.',
null=True, units='seconds', verbose_name='Stage Time (Avg)')
created = models.DateTimeField(auto_now_add=True)
objects = MetricsJobTypeManager()
class Meta(object):
"""meta information for the db"""
db_table = 'metrics_job_type'
| apache-2.0 | 2,971,459,741,209,194,500 | 56.151339 | 120 | 0.631719 | false |
lgarren/spack | var/spack/repos/builtin/packages/perl-test-needs/package.py | 1 | 1580 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlTestNeeds(PerlPackage):
"""Skip tests when modules not available."""
homepage = "http://search.cpan.org/~haarg/Test-Needs-0.002005/lib/Test/Needs.pm"
url = "http://search.cpan.org/CPAN/authors/id/H/HA/HAARG/Test-Needs-0.002005.tar.gz"
version('0.002005', '356634a56c99282e8059f290f5d534c8')
| lgpl-2.1 | 6,385,339,840,585,021,000 | 45.470588 | 93 | 0.679114 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.