content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# Generated by Django 3.1.7 on 2021-03-10 03:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Admins', '0036_auto_20210310_0337'),
]
operations = [
migrations.AlterField(
model_name='createpractioner',
name='id',
field=models.CharField(default='P27fc1', editable=False, max_length=6, primary_key=True, serialize=False),
),
]
| 24.157895 | 118 | 0.631808 | [
"MIT"
] | sd2001/Rethink-Backend | Admins/migrations/0037_auto_20210310_0337.py | 459 | Python |
import random
def HiringProblem(score, n):
sample_size = int(round(n / e))
print(f"\nRejecting first {sample_size} candidates as sample")
#finding best candidate in the sample set for benchmark
best_candidate = 0;
for i in range(1, sample_size):
if (score[i] > score[best_candidate]):
best_candidate = i
#finding the first best candidate outside the sample set
for i in range(sample_size, n):
if (score[i] >= score[best_candidate]):
best_candidate = i
break
if (best_candidate >= int(sample_size)):
print(f"\nThe best Candidate found is {best_candidate+1} with score {score[best_candidate]}")
else:
print("Couldn't find a best candidate")
# Driver code
if __name__ == "__main__":
e = 2.71828
n = int(input("Enter number of candidates to simulate\n")) #total number of candidate
score = []
#populating the list
for i in range(n):
score.append(random.randint(1, n))
print("Candidate\tScore\n");
for i in range(n):
print(f"{i+1}\t\t{score[i]}");
HiringProblem(score, n);
| 29.390244 | 102 | 0.591701 | [
"Apache-2.0"
] | STreK7/MSc.-CS | Semester I/Design and Analysis of Algorithm/Practical 04- Hiring Problem/HiringProblem.py | 1,205 | Python |
#
# The Template-Python distribution is Copyright (C) Sean McAfee 2007-2008,
# derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy
# Wardley. All Rights Reserved.
#
# The file "LICENSE" at the top level of this source distribution describes
# the terms under which this file may be distributed.
#
import collections
import re
import sys
from template import util
from template.constants import *
from template.directive import Directive
from template.grammar import Grammar
from template.util import TemplateException
"""
template.parser - LALR(1) parser for compiling template documents
SYNOPSIS
import template.parser
parser = template.parser.Parser(config)
template = parser.parse(text)
DESCRIPTION
The template.parser module implements a LALR(1) parser and associated
methods for parsing template documents into Python code.
PUBLIC METHODS
__init__(params)
The constructor initializes a new template.parser.Parser object. A
dictionary may be supplied as a parameter to provide configuration
values. These may include:
* START_TAG, END_TAG
The START_TAG and END_TAG options are used to specify character
sequences or regular expressions that mark the start and end of a
template directive. The default values for START_TAG and END_TAG are
'[%' and '%]' respectively, giving us the familiar directive style:
[% example %]
Any Python regex characters can be used and therefore should be
escaped (or use the re.escape function) if they are intended to
represent literal characters.
parser = template.parser.Parser({
'START_TAG': re.escape('<+'),
'END_TAG': re.escape('+>'),
})
example:
<+ INCLUDE foobar +>
The TAGS directive can also be used to set the START_TAG and END_TAG values
on a per-template file basis.
[% TAGS <+ +> %]
* TAG_STYLE
The TAG_STYLE option can be used to set both START_TAG and END_TAG
according to pre-defined tag styles.
parser = template.parser.Parser({
'TAG_STYLE': 'star',
})
Available styles are:
template [% ... %] (default)
template1 [% ... %] or %% ... %% (TT version 1)
metatext %% ... %% (Text::MetaText)
star [* ... *] (TT alternate)
php <? ... ?> (PHP)
asp <% ... %> (ASP)
mason <% ... > (HTML::Mason)
html <!-- ... --> (HTML comments)
Any values specified for START_TAG and/or END_TAG will over-ride those
defined by a TAG_STYLE.
The TAGS directive may also be used to set a TAG_STYLE
[% TAGS html %]
<!-- INCLUDE header -->
* PRE_CHOMP, POST_CHOMP
Anything outside a directive tag is considered plain text and is
generally passed through unaltered (but see the INTERPOLATE option).
This includes all whitespace and newlines characters surrounding
directive tags. Directives that don't generate any output will leave
gaps in the output document.
Example:
Foo
[% a = 10 %]
Bar
Output:
Foo
Bar
The PRE_CHOMP and POST_CHOMP options can help to clean up some of this
extraneous whitespace. Both are disabled by default.
parser = template.parser.Parser({
'PRE_CHOMP': 1,
'POST_CHOMP': 1,
})
With PRE_CHOMP set to 1, the newline and whitespace preceding a
directive at the start of a line will be deleted. This has the effect
of concatenating a line that starts with a directive onto the end of
the previous line.
Foo E<lt>----------.
|
,---(PRE_CHOMP)----'
|
`-- [% a = 10 %] --.
|
,---(POST_CHOMP)---'
|
`-E<gt> Bar
With POST_CHOMP set to 1, any whitespace after a directive up to and
including the newline will be deleted. This has the effect of joining
a line that ends with a directive onto the start of the next line.
If PRE_CHOMP or POST_CHOMP is set to 2, all whitespace including any
number of newline will be removed and replaced with a single space.
This is useful for HTML, where (usually) a contiguous block of
whitespace is rendered the same as a single space.
With PRE_CHOMP or POST_CHOMP set to 3, all adjacent whitespace
(including newlines) will be removed entirely.
These values are defined as CHOMP_NONE, CHOMP_ONE, CHOMP_COLLAPSE and
CHOMP_GREEDY constants in the template.constants module. CHOMP_ALL
is also defined as an alias for CHOMP_ONE to provide backwards
compatability with earlier version of the Template Toolkit.
Additionally the chomp tag modifiers listed below may also be used for
the PRE_CHOMP and POST_CHOMP configuration.
tt = template.Template({
'PRE_CHOMP': '~',
'POST_CHOMP': '-',
})
PRE_CHOMP and POST_CHOMP can be activated for individual directives by
placing a '-' immediately at the start and/or end of the directive.
[% FOREACH user IN userlist %]
[%- user -%]
[% END %]
This has the same effect as CHOMP_ONE in removing all whitespace
before or after the directive up to and including the newline. The
template will be processed as if written:
[% FOREACH user IN userlist %][% user %][% END %]
To remove all whitespace including any number of newlines, use the '~'
character instead.
[% FOREACH user IN userlist %]
[%~ user ~%]
[% END %]
To collapse all whitespace to a single space, use the '=' character.
[% FOREACH user IN userlist %]
[%= user =%]
[% END %]
Here the template is processed as if written:
[% FOREACH user IN userlist %] [% user %] [% END %]
If you have PRE_CHOMP or POST_CHOMP set as configuration options then
you can use '+' to disable any chomping options (i.e. leave the
whitespace intact) on a per-directive basis.
[% FOREACH user = userlist %]
User: [% user +%]
[% END %]
With POST_CHOMP set to CHOMP_ONE, the above example would be parsed as
if written:
[% FOREACH user = userlist %]User: [% user %]
[% END %]
For reference, the PRE_CHOMP and POST_CHOMP configuration options may be set to any of the following:
Constant Value Tag Modifier
----------------------------------
CHOMP_NONE 0 +
CHOMP_ONE 1 -
CHOMP_COLLAPSE 2 =
CHOMP_GREEDY 3 ~
* INTERPOLATE
The INTERPOLATE flag, when set to any true value will cause variable
references in plain text (i.e. not surrounded by START_TAG and
END_TAG) to be recognised and interpolated accordingly.
parser = template.parser.Parser({
'INTERPOLATE': 1,
})
Variables should be prefixed by a '$' to identify them. Curly braces
can be used in the familiar Perl/shell style to explicitly scope the
variable name where required.
# INTERPOLATE => 0
<a href="http://[% server %]/[% help %]">
<img src="[% images %]/help.gif"></a>
[% myorg.name %]
# INTERPOLATE => 1
<a href="http://$server/$help">
<img src="$images/help.gif"></a>
$myorg.name
# explicit scoping with { }
<img src="$images/${icon.next}.gif">
Note that a limitation in Perl's regex engine restricts the maximum
length of an interpolated template to around 32 kilobytes or possibly
less. Files that exceed this limit in size will typically cause Perl
to dump core with a segmentation fault. If you routinely process
templates of this size then you should disable INTERPOLATE or split
the templates in several smaller files or blocks which can then be
joined backed together via PROCESS or INCLUDE.
It is unknown whether this limitation is shared by the Python regex
engine.
* ANYCASE
By default, directive keywords should be expressed in UPPER CASE. The
ANYCASE option can be set to allow directive keywords to be specified
in any case.
# ANYCASE => 0 (default)
[% INCLUDE foobar %] # OK
[% include foobar %] # ERROR
[% include = 10 %] # OK, 'include' is a variable
# ANYCASE => 1
[% INCLUDE foobar %] # OK
[% include foobar %] # OK
[% include = 10 %] # ERROR, 'include' is reserved word
One side-effect of enabling ANYCASE is that you cannot use a variable
of the same name as a reserved word, regardless of case. The reserved
words are currently:
GET CALL SET DEFAULT INSERT INCLUDE PROCESS WRAPPER
IF UNLESS ELSE ELSIF FOR FOREACH WHILE SWITCH CASE
USE PLUGIN FILTER MACRO PYTHON RAWPYTHON BLOCK META
TRY THROW CATCH FINAL NEXT LAST BREAK RETURN STOP
CLEAR TO STEP AND OR NOT MOD DIV END
The only lower case reserved words that cannot be used for variables,
regardless of the ANYCASE option, are the operators:
and or not mod div
* V1DOLLAR
In version 1 of the Template Toolkit, an optional leading '$' could be placed
on any template variable and would be silently ignored.
# VERSION 1
[% $foo %] === [% foo %]
[% $hash.$key %] === [% hash.key %]
To interpolate a variable value the '${' ... '}' construct was used.
Typically, one would do this to index into a hash array when the key
value was stored in a variable.
example:
vars = {
users => {
'aba': { 'name': 'Alan Aardvark', ... },
'abw': { 'name': 'Andy Wardley', ... },
...
},
'uid': 'aba',
...
}
template.process('user/home.html', vars)
'user/home.html':
[% user = users.${uid} %] # users.aba
Name: [% user.name %] # Alan Aardvark
This was inconsistent with double quoted strings and also the
INTERPOLATE mode, where a leading '$' in text was enough to indicate a
variable for interpolation, and the additional curly braces were used
to delimit variable names where necessary. Note that this use is
consistent with UNIX and Perl conventions, among others.
# double quoted string interpolation
[% name = "$title ${user.name}" %]
# INTERPOLATE = 1
<img src="$images/help.gif"></a>
<img src="$images/${icon.next}.gif">
For version 2, these inconsistencies have been removed and the syntax
clarified. A leading '$' on a variable is now used exclusively to
indicate that the variable name should be interpolated
(e.g. subsituted for its value) before being used. The earlier example
from version 1:
# VERSION 1
[% user = users.${uid} %]
Name: [% user.name %]
can now be simplified in version 2 as:
# VERSION 2
[% user = users.$uid %]
Name: [% user.name %]
The leading dollar is no longer ignored and has the same effect of
interpolation as '${' ... '}' in version 1. The curly braces may
still be used to explicitly scope the interpolated variable name
where necessary.
e.g.
[% user = users.${me.id} %]
Name: [% user.name %]
The rule applies for all variables, both within directives and in
plain text if processed with the INTERPOLATE option. This means that
you should no longer (if you ever did) add a leading '$' to a variable
inside a directive, unless you explicitly want it to be interpolated.
One obvious side-effect is that any version 1 templates with variables
using a leading '$' will no longer be processed as expected. Given
the following variable definitions,
[% foo = 'bar'
bar = 'baz'
%]
version 1 would interpret the following as:
# VERSION 1
[% $foo %] => [% GET foo %] => bar
whereas version 2 interprets it as:
# VERSION 2
[% $foo %] => [% GET $foo %] => [% GET bar %] => baz
In version 1, the '$' is ignored and the value for the variable 'foo'
is retrieved and printed. In version 2, the variable '$foo' is first
interpolated to give the variable name 'bar' whose value is then
retrieved and printed.
The use of the optional '$' has never been strongly recommended, but
to assist in backwards compatibility with any version 1 templates that
may rely on this "feature", the V1DOLLAR option can be set to 1
(default: 0) to revert the behaviour and have leading '$' characters
ignored.
parser = template.parser.Parser->new({
'V1DOLLAR': 1,
});
* GRAMMAR
The GRAMMAR configuration item can be used to specify an alternate
grammar for the parser. This allows a modified or entirely new
template language to be constructed and used by the Template Toolkit.
Source templates are compiled to Python code by the template.parser
module using the template.grammar module (by default) to define the
language structure and semantics. Compiled templates are thus
inherently "compatible" with each other and there is nothing to prevent
any number of different template languages being compiled and used within
the same Template Toolkit processing environment (other than the usual
time and memory constraints).
The template.grammar file is constructed from a YACC like grammar
(using Parse::YAPP) and a skeleton module template. These files are
provided, along with a small script to rebuild the grammar, in the
'parser' sub-directory of the distribution. You don't have to know or
worry about these unless you want to hack on the template language or
define your own variant. There is a README file in the same directory
which provides some small guidance but it is assumed that you know
what you're doing if you venture herein. If you grok LALR parsers,
then you should find it comfortably familiar.
By default, an instance of the default template.grammar.Grammar will
be created and used automatically if a GRAMMAR item isn't specified.
import myorg.template.grammar
parser = template.parser.Parser({
'GRAMMAR': myorg.template.grammar.Grammar(),
})
* DEBUG
The DEBUG option can be used to enable various debugging features of
the Template::Parser module.
from template.constants import *
tt = template.Template({
'DEBUG': DEBUG_PARSER | DEBUG_DIRS,
})
The DEBUG value can include any of the following. Multiple values
should be combined using the logical OR operator, '|'.
** DEBUG_PARSER
This flag causes the Parser to generate debugging messages that show
the Python code generated by parsing and compiling each template.
** DEBUG_DIRS
This option causes the Template Toolkit to generate comments
indicating the source file, line and original text of each directive
in the template. These comments are embedded in the template output
using the format defined in the DEBUG_FORMAT configuration item, or a
simple default format if unspecified.
For example, the following template fragment:
Hello World
would generate this output:
## input text line 1 : ##
Hello
## input text line 2 : World ##
World
parse(text)
The parse() method parses the text passed in the first parameter and
returns a dictionary of data defining the compiled representation of
the template text, suitable for passing to the
template.document.Document constructor.
Example:
data = parser.parse(text)
The data dictionary returned contains a BLOCK item containing the
compiled Python code for the template, a DEFBLOCKS item containing a
dictionary of sub-template BLOCKs defined within in the template, and
a METADATA item containing a dictionary of metadata values defined in
META tags.
"""
CONTINUE = 0
ACCEPT = 1
ERROR = 2
ABORT = 3
TAG_STYLE = {
"default": (r"\[%", r"%\]"),
"template1": (r"[[%]%", r"%[]%]"),
"metatext": (r"%%", r"%%"),
"html": (r"<!--", r"-->"),
"mason": (r"<%", r">"),
"asp": (r"<%", r"%>"),
"php": (r"<\?", r"\?>"),
"star": (r"\[\*", r"\*\]"),
}
TAG_STYLE["template"] = TAG_STYLE["tt2"] = TAG_STYLE["default"]
DEFAULT_STYLE = {
"START_TAG": TAG_STYLE["default"][0],
"END_TAG": TAG_STYLE["default"][1],
"ANYCASE": 0,
"INTERPOLATE": 0,
"PRE_CHOMP": 0,
"POST_CHOMP": 0,
"V1DOLLAR": 0,
"EVAL_PYTHON": 0,
}
ESCAPE = {"n": "\n", "r": "\r", "t": "\t"}
CHOMP_FLAGS = r"[-=~+]"
CHOMP_ALL = str(CHOMP_ALL)
CHOMP_COLLAPSE = str(CHOMP_COLLAPSE)
CHOMP_GREEDY = str(CHOMP_GREEDY)
CHOMP_NONE = str(CHOMP_NONE)
CHOMP_CONST = {
"-": CHOMP_ALL,
"=": CHOMP_COLLAPSE,
"~": CHOMP_GREEDY,
"+": CHOMP_NONE
}
PRE_CHOMP = {
CHOMP_ALL: lambda x: re.sub(r"(\n|^)[^\S\n]*\Z", "", x),
CHOMP_COLLAPSE: lambda x: re.sub(r"\s+\Z", " ", x),
CHOMP_GREEDY: lambda x: re.sub(r"\s+\Z", "", x),
CHOMP_NONE: lambda x: x,
}
def postchomp(regex, prefix):
regex = re.compile(regex)
def strip(text, postlines):
match = regex.match(text)
if match:
text = prefix + text[match.end():]
postlines += match.group().count("\n")
return text, postlines
return strip
POST_CHOMP = {
CHOMP_ALL: postchomp(r"[^\S\n]*\n", ""),
CHOMP_COLLAPSE: postchomp(r"\s+", " "),
CHOMP_GREEDY: postchomp(r"\s+", ""),
CHOMP_NONE: lambda x, y: (x, y),
}
def Chomp(x):
return re.sub(r"[-=~+]", lambda m: CHOMP_CONST[m.group()], str(x))
GRAMMAR = re.compile(r"""
# strip out any comments
(\#[^\n]*)
|
# a quoted string matches in $3
(["']) # $2 - opening quote, ' or "
( # $3 - quoted text buffer
(?: # repeat group (no backreference)
\\\\ # an escaped backslash
| # ...or...
\\\2 # an escaped quote \" or \' (match $1)
| # ...or...
. # any other character
| \n
)*? # non-greedy repeat
) # end of $3
\2 # match opening quote
|
# an unquoted number matches in $4
(-? \d+ (?: \. \d+ )?) # numbers
|
# filename matches in $5
( /? \w+ (?: (?: /|::? ) \w* )+ | /\w+ )
|
# an identifier matches in $6
(\w+)
|
# an unquoted word or symbol matches in $7
( [(){}\[\]:;,/\\] # misc parentheses and symbols
| -> # arrow operator (for future?)
| [-+*] # math operations
| \${? # dollar with optional left brace
| => # like "="
| [=!<>]?= | [!<>] # equality tests
| &&? | \|\|? # boolean ops
| \.\.? # n..n sequence
| \S+ # something unquoted
) # end of $7
""", re.VERBOSE)
QUOTED_STRING = re.compile(r"""
( (?: \\. | [^\$] ){1,3000} ) # escaped or non-'$' character [$1]
|
( \$ (?: # embedded variable [$2]
(?: \{ ([^\}]*) \} ) # ${ ... } [$3]
|
([\w\.]+) # $word [$4]
)
)
""", re.VERBOSE)
class Error(Exception):
"""A trivial local exception class."""
pass
class Parser:
"""This module implements a LALR(1) parser and assocated support
methods to parse template documents into the appropriate "compiled"
format.
"""
def __init__(self, param):
self.start_tag = param.get("START_TAG") or DEFAULT_STYLE["START_TAG"]
self.end_tag = param.get("END_TAG") or DEFAULT_STYLE["END_TAG"]
self.tag_style = param.get("TAG_STYLE", "default")
self.anycase = param.get("ANYCASE", False)
self.interpolate = param.get("INTERPOLATE", False)
self.pre_chomp = param.get("PRE_CHOMP", CHOMP_NONE)
self.post_chomp = param.get("POST_CHOMP", CHOMP_NONE)
self.v1dollar = param.get("V1DOLLAR", False)
self.eval_python = param.get("EVAL_PYTHON", False)
self.file_info = param.get("FILE_INFO", 1)
self.grammar = param.get("GRAMMAR", Grammar())
self.factory = param.get("FACTORY", Directive)
self.fileinfo = []
self.defblocks = []
self.defblock_stack = []
self.infor = 0
self.inwhile = 0
self.style = []
# Build a FACTORY object to include any NAMESPACE definitions,
# but only if FACTORY isn't already a (non-callable) object.
if isinstance(self.factory, collections.Callable):
self.factory = self.factory(param)
self.lextable = self.grammar.lextable
self.states = self.grammar.states
self.rules = self.grammar.rules
self.new_style(param)
self.tokenize = (
((1,), self._comment),
((2, 3), self._string),
((4,), self._number),
((5,), self._filename),
((6,), self._identifier),
((7,), self._word),
)
def new_style(self, config):
"""Install a new (stacked) parser style.
This feature is currently experimental but should mimic the
previous behaviour with regard to TAG_STYLE, START_TAG, END_TAG,
etc.
"""
if self.style:
style = self.style[-1]
else:
style = DEFAULT_STYLE
style = style.copy()
tagstyle = config.get("TAG_STYLE")
if tagstyle:
tags = TAG_STYLE.get(tagstyle)
if tags is None:
raise Error("Invalid tag style: %s" % tagstyle)
start, end = tags
config["START_TAG"] = config.get("START_TAG", start)
config["END_TAG"] = config.get("END_TAG", end)
for key in DEFAULT_STYLE.keys():
value = config.get(key)
if value is not None:
style[key] = value
self.style.append(style)
return style
def old_style(self):
"""Pop the current parser style and revert to the previous one.
See new_style(). ** experimental **
"""
if len(self.style) <= 1:
raise Error("only 1 parser style remaining")
self.style.pop()
return self.style[-1]
def location(self):
"""Return Python comment indicating current parser file and line."""
if not self.file_info:
return "\n"
line = self.line
info = self.fileinfo[-1]
file = info and (info.path or info.name) or "(unknown template)"
line = re.sub(r"-.*", "", str(line)) # might be 'n-n'
return '#line %s "%s"\n' % (line, file)
def parse(self, text, info=None):
"""Parses the text string, text, and returns a dictionary
representing the compiled template block(s) as Python code, in the
format expected by template.document.
"""
self.defblock = {}
self.metadata = {}
tokens = self.split_text(text)
if tokens is None:
return None
self.fileinfo.append(info)
block = self._parse(tokens, info)
self.fileinfo.pop()
if block:
return {"BLOCK": block,
"DEFBLOCKS": self.defblock,
"METADATA": self.metadata}
else:
return None
def split_text(self, text):
"""Split input template text into directives and raw text chunks."""
tokens = []
line = 1
style = self.style[-1]
def make_splitter(delims):
return re.compile(r"(?s)(.*?)%s(.*?)%s" % delims)
splitter = make_splitter((style["START_TAG"], style["END_TAG"]))
while True:
match = splitter.match(text)
if not match:
break
text = text[match.end():]
pre, dir = match.group(1), match.group(2)
prelines = pre.count("\n")
dirlines = dir.count("\n")
postlines = 0
if dir.startswith("#"):
# commment out entire directive except for any end chomp flag
match = re.search(CHOMP_FLAGS + "$", dir)
if match:
dir = match.group()
else:
dir = ""
else:
# PRE_CHOMP: process whitespace before tag
match = re.match(r"(%s)?\s*" % CHOMP_FLAGS, dir)
chomp = Chomp(match and match.group(1) or style["PRE_CHOMP"])
if match:
dir = dir[match.end():]
pre = PRE_CHOMP[chomp](pre)
# POST_CHOMP: process whitespace after tag
match = re.search(r"\s*(%s)?\s*$" % CHOMP_FLAGS, dir)
chomp = Chomp(match and match.group(1) or style["POST_CHOMP"])
if match:
dir = dir[:match.start()]
text, postlines = POST_CHOMP[chomp](text, postlines)
if pre:
if style["INTERPOLATE"]:
tokens.append([pre, line, 'ITEXT'])
else:
tokens.extend(["TEXT", pre])
line += prelines
if dir:
# The TAGS directive is a compile-time switch.
match = re.match(r"(?i)TAGS\s+(.*)", dir)
if match:
tags = re.split(r"\s+", match.group(1))
if len(tags) > 1:
splitter = make_splitter(tuple(re.escape(x) for x in tags[:2]))
elif tags[0] in TAG_STYLE:
splitter = make_splitter(TAG_STYLE[tags[0]])
else:
sys.stderr.write("Invalid TAGS style: %s" % tags[0])
else:
if dirlines > 0:
line_range = "%d-%d" % (line, line + dirlines)
else:
line_range = str(line)
tokens.append([dir, line_range, self.tokenise_directive(dir)])
line += dirlines + postlines
if text:
if style["INTERPOLATE"]:
tokens.append([text, line, "ITEXT"])
else:
tokens.extend(["TEXT", text])
return tokens
def _comment(self, token):
"""Tokenizes a comment."""
return ()
def _string(self, quote, token):
"""Tokenizes a string."""
if quote == '"':
if re.search(r"[$\\]", token):
# unescape " and \ but leave \$ escaped so that
# interpolate_text() doesn't incorrectly treat it
# as a variable reference
token = re.sub(r'\\([\\"])', r'\1', token)
token = re.sub(r'\\([^$nrt])', r'\1', token)
token = re.sub(r'\\([nrt])', lambda m: ESCAPE[m.group(1)], token)
return ['"', '"'] + self.interpolate_text(token) + ['"', '"']
else:
return "LITERAL", "scalar(%r)" % token
else:
# Remove escaped single quotes and backslashes:
token = re.sub(r"\\(.)", lambda m: m.group(m.group(1) in "'\\"), token)
return "LITERAL", "scalar(%r)" % token
def _number(self, token):
"""Tokenizes a number."""
return "NUMBER", "scalar(%s)" % token
def _filename(self, token):
"""Tokenizes a filename."""
return "FILENAME", token
def _identifier(self, token):
"""Tokenizes an identifier."""
if self.anycase:
uctoken = token.upper()
else:
uctoken = token
toktype = self.lextable.get(uctoken)
if toktype is not None:
return toktype, uctoken
else:
return "IDENT", token
def _word(self, token):
"""Tokenizes an unquoted word or symbol ."""
return self.lextable.get(token, "UNQUOTED"), token
def tokenise_directive(self, dirtext):
"""Called by the private _parse() method when it encounters a
DIRECTIVE token in the list provided by the split_text() or
interpolate_text() methods.
The method splits the directive into individual tokens as
recognised by the parser grammar (see template.grammar for
details). It constructs a list of tokens each represented by 2
elements, as per split_text() et al. The first element contains
the token type, the second the token itself.
The method tokenises the string using a complex (but fast) regex.
For a deeper understanding of the regex magic at work here, see
Jeffrey Friedl's excellent book "Mastering Regular Expressions",
from O'Reilly, ISBN 1-56592-257-3
Returns the list of chunks (each one being 2 elements) identified
in the directive text.
"""
tokens = []
for match in GRAMMAR.finditer(dirtext):
for indices, method in self.tokenize:
if match.group(indices[0]):
tokens.extend(method(*list(map(match.group, indices))))
break
return tokens
def _parse(self, tokens, info):
"""Parses the list of input tokens passed by reference and returns
an object which contains the compiled representation of the
template.
This is the main parser DFA loop. See embedded comments for
further details.
"""
self.grammar.install_factory(self.factory)
stack = [[0, None]] # DFA stack
coderet = None
token = None
in_string = False
in_python = False
status = CONTINUE
lhs = None
text = None
self.line = 0
self.file = info and info.name
self.inpython = 0
value = None
while True:
stateno = stack[-1][0]
state = self.states[stateno]
# see if any lookaheads exist for the current state
if "ACTIONS" in state:
# get next token and expand any directives (ie. token is a
# list) onto the front of the token list
while token is None and tokens:
token = tokens.pop(0)
if isinstance(token, (list, tuple)):
text, self.line, token = util.unpack(token, 3)
if isinstance(token, (list, tuple)):
tokens[:0] = token + [";", ";"]
token = None # force redo
elif token == "ITEXT":
if in_python:
# don't perform interpolation in PYTHON blocks
token = "TEXT"
value = text
else:
tokens[:0] = self.interpolate_text(text, self.line)
token = None # force redo
else:
# toggle string flag to indicate if we're crossing
# a string boundary
if token == '"':
in_string = not in_string
value = tokens and tokens.pop(0) or None
if token is None:
token = ""
# get the next state for the current lookahead token
lookup = state["ACTIONS"].get(token)
if lookup:
action = lookup
else:
action = state.get("DEFAULT")
else:
# no lookahead assertions
action = state.get("DEFAULT")
# ERROR: no ACTION
if action is None:
break
# shift (positive ACTION)
if action > 0:
stack.append([action, value])
token = value = None
else:
# reduce (negative ACTION)
lhs, len_, code = self.rules[-action]
# no action implies ACCEPTance
if not action:
status = ACCEPT
# use dummy sub if code ref doesn't exist
if not code:
code = lambda *arg: len(arg) >= 2 and arg[1] or None
if len_ > 0:
codevars = [x[1] for x in stack[-len_:]]
else:
codevars = []
try:
coderet = code(self, *codevars)
except TemplateException as e:
self._parse_error(str(e), info.name)
# reduce stack by len_
if len_ > 0:
stack[-len_:] = []
# ACCEPT
if status == ACCEPT:
return coderet
elif status == ABORT:
return None
elif status == ERROR:
break
stack.append([self.states[stack[-1][0]].get("GOTOS", {}).get(lhs),
coderet])
# ERROR
if value is None:
self._parse_error("unexpected end of input", info.name)
elif value == ";":
self._parse_error("unexpected end of directive", info.name, text)
else:
self._parse_error("unexpected token (%s)" %
util.unscalar_lex(value), info.name, text)
def _parse_error(self, msg, name, text=None):
"""Method used to handle errors encountered during the parse process
in the _parse() method.
"""
line = self.line or "unknown"
if text is not None:
msg += "\n [%% %s %%]" % text
raise TemplateException("parse", "%s line %s: %s" % (name, line, msg))
def define_block(self, name, block):
"""Called by the parser 'defblock' rule when a BLOCK definition is
encountered in the template.
The name of the block is passed in the first parameter and a
reference to the compiled block is passed in the second. This
method stores the block in the self.defblock dictionary which has
been initialised by parse() and will later be used by the same
method to call the store() method on the calling cache to define
the block "externally".
"""
if self.defblock is None:
return None
self.defblock[name] = block
return None
def push_defblock(self):
self.defblock_stack.append(self.defblock)
self.defblock = {}
def pop_defblock(self):
if not self.defblock_stack:
return self.defblock
block = self.defblock
self.defblock = self.defblock_stack.pop(0)
return block
def add_metadata(self, setlist):
setlist = [util.unscalar_lex(x) for x in setlist]
if self.metadata is not None:
for key, value in util.chop(setlist, 2):
self.metadata[key] = value
return None
def interpolate_text(self, text, line=0):
"""Examines text looking for any variable references embedded
like $this or like ${ this }.
"""
tokens = []
for match in QUOTED_STRING.finditer(text):
pre = match.group(1)
var = match.group(3) or match.group(4)
dir = match.group(2)
# preceding text
if pre:
line += pre.count("\n")
tokens.extend(("TEXT", pre.replace("\\$", "$")))
# variable reference
if var:
line += dir.count("\n")
tokens.append([dir, line, self.tokenise_directive(var)])
# other '$' reference - treated as text
elif dir:
line += dir.count("\n")
tokens.extend(("TEXT", dir))
return tokens
| 32.818182 | 101 | 0.58583 | [
"Artistic-2.0"
] | lmr/Template-Toolkit-Python | template/parser.py | 35,017 | Python |
#!/usr/bin/env python2
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to monalisa.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
| 26.547619 | 79 | 0.728251 | [
"MIT"
] | jazetjaz/monalisa | share/rpcuser/rpcuser.py | 1,115 | Python |
# Local Django
from .messagevalidator import MessageValidator
| 20.666667 | 46 | 0.854839 | [
"MIT"
] | fga-eps-mds/2017.2-Receita-Mais | medical_prescription/chat/validators/__init__.py | 62 | Python |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import cv2
import tensorflow as tf
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# In[3]:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow import keras
from tensorflow.keras import layers
# ### Load the Training Data
# In[4]:
# curwd = str(os.getcwd())
# targetwd = '\\data\\train'
# path_train = curwd + targetwd
path_train = '...s\\Documents\\whale_identification\\whale_identification\\data\\train\\'
train = [os.path.join(path_train,f) for f in os.listdir(path_train) if f.endswith('.jpg')]
# In[6]:
train_labels = pd.read_csv("df_train.csv")
# In[7]:
train_labels.head()
# In[8]:
unique_whales = train_labels['Id'].unique()
len(unique_whales)
# ### Train-Validation Split
# In[9]:
def train_valid_split(df):
# find unique categories of whales in our dataframe
unique_whales = train_labels['Id'].unique()
# map the images to categories
mapping = {}
for whale in unique_whales:
lst_of_images = list(train_labels[train_labels['Id'] == whale]['Image'].values)
mapping[whale] = lst_of_images
# perform manual train/validation split to ensure balanced data in both sets (i.e. all categories are represented)
train_revised = []
valid_revised = []
for v in mapping.values():
cut = int(0.2*len(v)) # sample & 80-20 split
cut2 = int(0.25*len(v))
tr = v[:cut]
val = v[cut:cut2]
train_revised.append(tr)
valid_revised.append(val)
return train_revised, valid_revised
def train_valid_dict_generator(train_list, valid_list, df):
# create a dictionary mapping new training set to correct labels
train_df = {}
for i in train_list:
for j in i:
lbl = df[df['Image'] == j]['Id'].values[0]
train_df[j] = lbl
# create a dictionary mapping new validation set to correct labels
valid_df = {}
for i in valid_list:
for j in i:
lbl = df[df['Image'] == j]['Id'].values[0]
valid_df[j] = lbl
return train_df, valid_df | 20.082569 | 118 | 0.649612 | [
"MIT"
] | njamalova/whale_tail_identifier | train_valid_split.py | 2,189 | Python |
# -*- coding: utf-8 -*-
from odoo import fields
from odoo.tests.common import Form, SavepointCase
from odoo.tests import tagged
from contextlib import contextmanager
from unittest.mock import patch
import datetime
@tagged('post_install', '-at_install')
class AccountTestInvoicingCommon(SavepointCase):
@classmethod
def copy_account(cls, account):
suffix_nb = 1
while True:
new_code = '%s (%s)' % (account.code, suffix_nb)
if account.search_count([('company_id', '=', account.company_id.id), ('code', '=', new_code)]):
suffix_nb += 1
else:
return account.copy(default={'code': new_code})
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass()
if chart_template_ref:
chart_template = cls.env.ref(chart_template_ref)
else:
chart_template = cls.env.ref('l10n_generic_coa.configurable_chart_template', raise_if_not_found=False)
if not chart_template:
cls.tearDownClass()
# skipTest raises exception
cls.skipTest(cls, "Accounting Tests skipped because the user's company has no chart of accounts.")
# Create user.
user = cls.env['res.users'].create({
'name': 'Because I am accountman!',
'login': 'accountman',
'groups_id': [(6, 0, cls.env.user.groups_id.ids), (4, cls.env.ref('account.group_account_user').id)],
})
user.partner_id.email = '[email protected]'
# Shadow the current environment/cursor with one having the report user.
# This is mandatory to test access rights.
cls.env = cls.env(user=user)
cls.cr = cls.env.cr
cls.company_data_2 = cls.setup_company_data('company_2_data', chart_template)
cls.company_data = cls.setup_company_data('company_1_data', chart_template)
user.write({
'company_ids': [(6, 0, (cls.company_data['company'] + cls.company_data_2['company']).ids)],
'company_id': cls.company_data['company'].id,
})
cls.currency_data = cls.setup_multi_currency_data()
# ==== Taxes ====
cls.tax_sale_a = cls.company_data['default_tax_sale']
cls.tax_sale_b = cls.company_data['default_tax_sale'].copy()
cls.tax_purchase_a = cls.company_data['default_tax_purchase']
cls.tax_purchase_b = cls.company_data['default_tax_purchase'].copy()
cls.tax_armageddon = cls.setup_armageddon_tax('complex_tax', cls.company_data)
# ==== Products ====
cls.product_a = cls.env['product.product'].create({
'name': 'product_a',
'uom_id': cls.env.ref('uom.product_uom_unit').id,
'lst_price': 1000.0,
'standard_price': 800.0,
'property_account_income_id': cls.company_data['default_account_revenue'].id,
'property_account_expense_id': cls.company_data['default_account_expense'].id,
'taxes_id': [(6, 0, cls.tax_sale_a.ids)],
'supplier_taxes_id': [(6, 0, cls.tax_purchase_a.ids)],
})
cls.product_b = cls.env['product.product'].create({
'name': 'product_b',
'uom_id': cls.env.ref('uom.product_uom_dozen').id,
'lst_price': 200.0,
'standard_price': 160.0,
'property_account_income_id': cls.copy_account(cls.company_data['default_account_revenue']).id,
'property_account_expense_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'taxes_id': [(6, 0, (cls.tax_sale_a + cls.tax_sale_b).ids)],
'supplier_taxes_id': [(6, 0, (cls.tax_purchase_a + cls.tax_purchase_b).ids)],
})
# ==== Fiscal positions ====
cls.fiscal_pos_a = cls.env['account.fiscal.position'].create({
'name': 'fiscal_pos_a',
'tax_ids': [
(0, None, {
'tax_src_id': cls.tax_sale_a.id,
'tax_dest_id': cls.tax_sale_b.id,
}),
(0, None, {
'tax_src_id': cls.tax_purchase_a.id,
'tax_dest_id': cls.tax_purchase_b.id,
}),
],
'account_ids': [
(0, None, {
'account_src_id': cls.product_a.property_account_income_id.id,
'account_dest_id': cls.product_b.property_account_income_id.id,
}),
(0, None, {
'account_src_id': cls.product_a.property_account_expense_id.id,
'account_dest_id': cls.product_b.property_account_expense_id.id,
}),
],
})
# ==== Payment terms ====
cls.pay_terms_a = cls.env.ref('account.account_payment_term_immediate')
cls.pay_terms_b = cls.env['account.payment.term'].create({
'name': '30% Advance End of Following Month',
'note': 'Payment terms: 30% Advance End of Following Month',
'line_ids': [
(0, 0, {
'value': 'percent',
'value_amount': 30.0,
'sequence': 400,
'days': 0,
'option': 'day_after_invoice_date',
}),
(0, 0, {
'value': 'balance',
'value_amount': 0.0,
'sequence': 500,
'days': 31,
'option': 'day_following_month',
}),
],
})
# ==== Partners ====
cls.partner_a = cls.env['res.partner'].create({
'name': 'partner_a',
'property_payment_term_id': cls.pay_terms_a.id,
'property_supplier_payment_term_id': cls.pay_terms_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].id,
'property_account_payable_id': cls.company_data['default_account_payable'].id,
'company_id': False,
})
cls.partner_b = cls.env['res.partner'].create({
'name': 'partner_b',
'property_payment_term_id': cls.pay_terms_b.id,
'property_supplier_payment_term_id': cls.pay_terms_b.id,
'property_account_position_id': cls.fiscal_pos_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].copy().id,
'property_account_payable_id': cls.company_data['default_account_payable'].copy().id,
'company_id': False,
})
# ==== Cash rounding ====
cls.cash_rounding_a = cls.env['account.cash.rounding'].create({
'name': 'add_invoice_line',
'rounding': 0.05,
'strategy': 'add_invoice_line',
'account_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'rounding_method': 'UP',
})
cls.cash_rounding_b = cls.env['account.cash.rounding'].create({
'name': 'biggest_tax',
'rounding': 0.05,
'strategy': 'biggest_tax',
'rounding_method': 'DOWN',
})
@classmethod
def setup_company_data(cls, company_name, chart_template, **kwargs):
''' Create a new company having the name passed as parameter.
A chart of accounts will be installed to this company: the same as the current company one.
The current user will get access to this company.
:param company_name: The name of the company.
:return: A dictionary will be returned containing all relevant accounting data for testing.
'''
def search_account(company, chart_template, field_name, domain):
template_code = chart_template[field_name].code
domain = [('company_id', '=', company.id)] + domain
account = None
if template_code:
account = cls.env['account.account'].search(domain + [('code', '=like', template_code + '%')], limit=1)
if not account:
account = cls.env['account.account'].search(domain, limit=1)
return account
currency = chart_template.currency_id
company = cls.env['res.company'].create({
'name': company_name,
'currency_id': currency.id,
**kwargs,
})
cls.env.user.company_ids |= company
chart_template.try_loading(company=company)
# The currency could be different after the installation of the chart template.
company.write({'currency_id': kwargs.get('currency_id', currency.id)})
return {
'company': company,
'currency': company.currency_id,
'default_account_revenue': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_revenue').id)
], limit=1),
'default_account_expense': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_expenses').id)
], limit=1),
'default_account_receivable': search_account(company, chart_template, 'property_account_receivable_id', [
('user_type_id.type', '=', 'receivable')
]),
'default_account_payable': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id.type', '=', 'payable')
], limit=1),
'default_account_assets': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_current_assets').id)
], limit=1),
'default_account_tax_sale': company.account_sale_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_account_tax_purchase': company.account_purchase_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_journal_misc': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'general')
], limit=1),
'default_journal_sale': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'sale')
], limit=1),
'default_journal_purchase': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'purchase')
], limit=1),
'default_journal_bank': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'bank')
], limit=1),
'default_journal_cash': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'cash')
], limit=1),
'default_tax_sale': company.account_sale_tax_id,
'default_tax_purchase': company.account_purchase_tax_id,
}
@classmethod
def setup_multi_currency_data(cls, default_values={}, rate2016=3.0, rate2017=2.0):
foreign_currency = cls.env['res.currency'].create({
'name': 'Gold Coin',
'symbol': '☺',
'rounding': 0.001,
'position': 'after',
'currency_unit_label': 'Gold',
'currency_subunit_label': 'Silver',
**default_values,
})
rate1 = cls.env['res.currency.rate'].create({
'name': '2016-01-01',
'rate': rate2016,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
rate2 = cls.env['res.currency.rate'].create({
'name': '2017-01-01',
'rate': rate2017,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
return {
'currency': foreign_currency,
'rates': rate1 + rate2,
}
@classmethod
def setup_armageddon_tax(cls, tax_name, company_data):
return cls.env['account.tax'].create({
'name': '%s (group)' % tax_name,
'amount_type': 'group',
'amount': 0.0,
'children_tax_ids': [
(0, 0, {
'name': '%s (child 1)' % tax_name,
'amount_type': 'percent',
'amount': 20.0,
'price_include': True,
'include_base_amount': True,
'tax_exigibility': 'on_invoice',
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
}),
(0, 0, {
'name': '%s (child 2)' % tax_name,
'amount_type': 'percent',
'amount': 10.0,
'tax_exigibility': 'on_payment',
'cash_basis_transition_account_id': company_data['default_account_tax_sale'].copy().id,
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
}),
],
})
@classmethod
def init_invoice(cls, move_type, partner=None, invoice_date=None):
move_form = Form(cls.env['account.move'].with_context(default_type=move_type))
move_form.invoice_date = invoice_date or fields.Date.from_string('2019-01-01')
move_form.partner_id = partner or cls.partner_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = cls.product_a
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = cls.product_b
return move_form.save()
def assertInvoiceValues(self, move, expected_lines_values, expected_move_values):
def sort_lines(lines):
return lines.sorted(lambda line: (line.exclude_from_invoice_tab, not bool(line.tax_line_id), line.name or '', line.balance))
self.assertRecordValues(sort_lines(move.line_ids.sorted()), expected_lines_values)
self.assertRecordValues(sort_lines(move.invoice_line_ids.sorted()), expected_lines_values[:len(move.invoice_line_ids)])
self.assertRecordValues(move, [expected_move_values])
@contextmanager
def mocked_today(self, forced_today):
''' Helper to make easily a python "with statement" mocking the "today" date.
:param forced_today: The expected "today" date as a str or Date object.
:return: An object to be used like 'with self.mocked_today(<today>):'.
'''
if isinstance(forced_today, str):
forced_today_date = fields.Date.from_string(forced_today)
forced_today_datetime = fields.Datetime.from_string(forced_today)
elif isinstance(forced_today, datetime.datetime):
forced_today_datetime = forced_today
forced_today_date = forced_today_datetime.date()
else:
forced_today_date = forced_today
forced_today_datetime = datetime.datetime.combine(forced_today_date, datetime.time())
def today(*args, **kwargs):
return forced_today_date
with patch.object(fields.Date, 'today', today):
with patch.object(fields.Date, 'context_today', today):
with patch.object(fields.Datetime, 'now', return_value=forced_today_datetime):
yield
class AccountingSavepointCase(AccountTestInvoicingCommon):
# Ensure the backward-compatibility before saas-13.2.
pass
| 44.178484 | 136 | 0.532293 | [
"MIT"
] | LucasBorges-Santos/docker-odoo | odoo/base-addons/account/tests/account_test_savepoint.py | 18,071 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#############
## Imports ##
#############
import os
import sys ; sys.path.append("/home/developer/workspace/rklearn-lib")
import tensorflow as tf
from rklearn.tfoo_v1 import BaseModel
#################
## CIFAR10CNN ##
#################
class CIFAR10CNN(BaseModel):
################
## __init__() ##
################
def __init__(self, config, logger = None):
super().__init__(config, logger)
try:
# these parameters are sent to the trainer through the model because it is easier
self.num_epochs = self.config.cifar10_cnn["num_epochs"]
self.learning_rate = self.config.cifar10_cnn["learning_rate"]
self.max_to_keep = self.config.cifar10_cnn["max_to_keep"]
self.checkpoint_dir = self.config.cifar10_cnn["checkpoint_dir"]
self.model_dir = self.config.cifar10_cnn["model_dir"]
os.makedirs(self.checkpoint_dir, exist_ok = True)
os.makedirs(self.model_dir, exist_ok = True)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logger.error("error msg = {}, error type = {}, error file = {}, error line = {}".format(e, exc_type, fname, exc_tb.tb_lineno))
raise RuntimeError("Error in CIFAR10CNN construction regarding the checkpoints and model directories!")
###################
## build_model() ##
###################
def build_model(self):
"""
Build the custom CNN for the CIFAR-10 dataset.
"""
# The input data holders (cf. shapes after prepa)
self.X = tf.compat.v1.placeholder(tf.float32, shape = (None,
self.config.data["image_size"],
self.config.data["image_size"],
self.config.data["num_channels"]), name="X") # ex. (50000, 32, 32, 3)
self.y = tf.compat.v1.placeholder(tf.int32, shape = (None, self.config.data["num_categories"]), name="y") # ex. (50000, 10)
self.train = tf.compat.v1.placeholder(tf.bool)
# The CNN architecture = conv/poo layers + flatten layer + connected layers
with tf.name_scope("cnn"):
# a. Create convolution/pooling layers = conv + drop + pool + conv + drop + pool + conv + pool + conv + drop
self.conv1 = tf.layers.conv2d(self.X,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop1 = tf.layers.dropout(self.conv1, self.config.cifar10_cnn["keep_prob"], training=self.train)
self.pool1 = tf.layers.max_pooling2d(self.drop1, 2, 2)
self.conv2 = tf.layers.conv2d(self.pool1,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop2 = tf.layers.dropout(self.conv2, self.config.cifar10_cnn["keep_prob"], training=self.train)
self.pool2 = tf.layers.max_pooling2d(self.drop2, 2, 2)
self.conv3 = tf.layers.conv2d(self.pool2,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.pool3 = tf.layers.max_pooling2d(self.conv3, 2, 2)
self.conv4 = tf.layers.conv2d(self.pool3,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop3 = tf.layers.dropout(self.conv4, self.config.cifar10_cnn["keep_prob"], training=self.train)
# b. Flatten input data
self.flatten = tf.reshape(self.drop3, [-1, self.config.cifar10_cnn["fc1_nb_units"]])
# Create connected layers: fc1, fc2
with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected],
normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={"is_training": self.train}):
self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn["fc1_nb_units"])
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data["num_categories"], activation_fn=None)
# Compute loss
with tf.name_scope("loss"):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y))
# Optimizer
with tf.name_scope("training_op"):
self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Perf metrics
with tf.name_scope("accuracy"):
prediction = tf.equal(tf.argmax(self.fc2, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
| 47.652542 | 138 | 0.538858 | [
"MIT"
] | rejux/rklearn-lib | rklearn/tests/it/cifar10_cnn.py | 5,623 | Python |
from os import environ, unsetenv
TESTING_DB_FLAG = 'tethys-testing_'
def set_testing_environment(val):
if val:
environ['TETHYS_TESTING_IN_PROGRESS'] = 'true'
else:
environ['TETHYS_TESTING_IN_PROGRESS'] = ''
del environ['TETHYS_TESTING_IN_PROGRESS']
unsetenv('TETHYS_TESTING_IN_PROGRESS')
def get_test_db_name(orig_name):
if TESTING_DB_FLAG not in orig_name:
test_db_name = '{0}{1}'.format(TESTING_DB_FLAG, orig_name)
else:
test_db_name = orig_name
return test_db_name
def is_testing_environment():
return environ.get('TETHYS_TESTING_IN_PROGRESS')
| 24.115385 | 66 | 0.712919 | [
"BSD-2-Clause"
] | quyendong/tethys | tethys_apps/base/testing/environment.py | 627 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/7/25 0025 上午 10:14
# @Author : Exchris Tsai
# @Site :
# @File : example52.py
# @Software: PyCharm
"""
题目:学习使用按位或 | 。
程序分析:0|0=0; 0|1=1; 1|0=1; 1|1=1
"""
__author__ = 'Exchris Tsai'
if __name__ == '__main__':
a = 0o77
b = a | 3
print('a | b is %d' %b)
b |= 7
print('a | b is %d' %b) | 17.904762 | 36 | 0.513298 | [
"MIT"
] | exchris/Pythonlearn | Old/exercise/example52.py | 412 | Python |
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2015 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
import sys
import math
from PyQt5.QtCore import pyqtSignal, QSize, Qt, QTimer
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import (QAction, QApplication, QGridLayout, QLabel,
QLineEdit, QMainWindow, QMessageBox, QOpenGLWidget, QScrollArea,
QSizePolicy, QSlider, QWidget)
class GLWidget(QOpenGLWidget):
xRotationChanged = pyqtSignal(int)
yRotationChanged = pyqtSignal(int)
zRotationChanged = pyqtSignal(int)
def __init__(self, parent=None):
super(GLWidget, self).__init__(parent)
self.gear1 = 0
self.gear2 = 0
self.gear3 = 0
self.xRot = 0
self.yRot = 0
self.zRot = 0
self.gear1Rot = 0
timer = QTimer(self)
timer.timeout.connect(self.advanceGears)
timer.start(20)
def setXRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.xRot:
self.xRot = angle
self.xRotationChanged.emit(angle)
self.update()
def setYRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.yRot:
self.yRot = angle
self.yRotationChanged.emit(angle)
self.update()
def setZRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.zRot:
self.zRot = angle
self.zRotationChanged.emit(angle)
self.update()
def initializeGL(self):
self.gl = self.context().versionFunctions()
self.gl.initializeOpenGLFunctions()
lightPos = (5.0, 5.0, 10.0, 1.0)
reflectance1 = (0.8, 0.1, 0.0, 1.0)
reflectance2 = (0.0, 0.8, 0.2, 1.0)
reflectance3 = (0.2, 0.2, 1.0, 1.0)
self.gl.glLightfv(self.gl.GL_LIGHT0, self.gl.GL_POSITION, lightPos)
self.gl.glEnable(self.gl.GL_LIGHTING)
self.gl.glEnable(self.gl.GL_LIGHT0)
self.gl.glEnable(self.gl.GL_DEPTH_TEST)
self.gear1 = self.makeGear(reflectance1, 1.0, 4.0, 1.0, 0.7, 20)
self.gear2 = self.makeGear(reflectance2, 0.5, 2.0, 2.0, 0.7, 10)
self.gear3 = self.makeGear(reflectance3, 1.3, 2.0, 0.5, 0.7, 10)
self.gl.glEnable(self.gl.GL_NORMALIZE)
self.gl.glClearColor(0.0, 0.0, 0.0, 1.0)
def paintGL(self):
self.gl.glClear(self.gl.GL_COLOR_BUFFER_BIT | self.gl.GL_DEPTH_BUFFER_BIT)
self.gl.glPushMatrix()
self.gl.glRotated(self.xRot / 16.0, 1.0, 0.0, 0.0)
self.gl.glRotated(self.yRot / 16.0, 0.0, 1.0, 0.0)
self.gl.glRotated(self.zRot / 16.0, 0.0, 0.0, 1.0)
self.drawGear(self.gear1, -3.0, -2.0, 0.0, self.gear1Rot / 16.0)
self.drawGear(self.gear2, +3.1, -2.0, 0.0,
-2.0 * (self.gear1Rot / 16.0) - 9.0)
self.gl.glRotated(+90.0, 1.0, 0.0, 0.0)
self.drawGear(self.gear3, -3.1, -1.8, -2.2,
+2.0 * (self.gear1Rot / 16.0) - 2.0)
self.gl.glPopMatrix()
def resizeGL(self, width, height):
side = min(width, height)
if side < 0:
return
self.gl.glViewport((width - side) // 2, (height - side) // 2, side, side)
self.gl.glMatrixMode(self.gl.GL_PROJECTION)
self.gl.glLoadIdentity()
self.gl.glFrustum(-1.0, +1.0, -1.0, 1.0, 5.0, 60.0)
self.gl.glMatrixMode(self.gl.GL_MODELVIEW)
self.gl.glLoadIdentity()
self.gl.glTranslated(0.0, 0.0, -40.0)
def mousePressEvent(self, event):
self.lastPos = event.pos()
def mouseMoveEvent(self, event):
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
if event.buttons() & Qt.LeftButton:
self.setXRotation(self.xRot + 8 * dy)
self.setYRotation(self.yRot + 8 * dx)
elif event.buttons() & Qt.RightButton:
self.setXRotation(self.xRot + 8 * dy)
self.setZRotation(self.zRot + 8 * dx)
self.lastPos = event.pos()
def advanceGears(self):
self.gear1Rot += 2 * 16
self.update()
def xRotation(self):
return self.xRot
def yRotation(self):
return self.yRot
def zRotation(self):
return self.zRot
def makeGear(self, reflectance, innerRadius, outerRadius, thickness, toothSize, toothCount):
list = self.gl.glGenLists(1)
self.gl.glNewList(list, self.gl.GL_COMPILE)
self.gl.glMaterialfv(self.gl.GL_FRONT, self.gl.GL_AMBIENT_AND_DIFFUSE,
reflectance)
r0 = innerRadius
r1 = outerRadius - toothSize / 2.0
r2 = outerRadius + toothSize / 2.0
delta = (2.0 * math.pi / toothCount) / 4.0
z = thickness / 2.0
self.gl.glShadeModel(self.gl.GL_FLAT)
for i in range(2):
if i == 0:
sign = +1.0
else:
sign = -1.0
self.gl.glNormal3d(0.0, 0.0, sign)
self.gl.glBegin(self.gl.GL_QUAD_STRIP)
for j in range(toothCount+1):
angle = 2.0 * math.pi * j / toothCount
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), sign * z)
self.gl.glVertex3d(r1 * math.cos(angle), r1 * math.sin(angle), sign * z)
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), sign * z)
self.gl.glVertex3d(r1 * math.cos(angle + 3 * delta), r1 * math.sin(angle + 3 * delta), sign * z)
self.gl.glEnd()
self.gl.glBegin(self.gl.GL_QUADS)
for j in range(toothCount):
angle = 2.0 * math.pi * j / toothCount
self.gl.glVertex3d(r1 * math.cos(angle), r1 * math.sin(angle), sign * z)
self.gl.glVertex3d(r2 * math.cos(angle + delta), r2 * math.sin(angle + delta), sign * z)
self.gl.glVertex3d(r2 * math.cos(angle + 2 * delta), r2 * math.sin(angle + 2 * delta), sign * z)
self.gl.glVertex3d(r1 * math.cos(angle + 3 * delta), r1 * math.sin(angle + 3 * delta), sign * z)
self.gl.glEnd()
self.gl.glBegin(self.gl.GL_QUAD_STRIP)
for i in range(toothCount):
for j in range(2):
angle = 2.0 * math.pi * (i + (j / 2.0)) / toothCount
s1 = r1
s2 = r2
if j == 1:
s1, s2 = s2, s1
self.gl.glNormal3d(math.cos(angle), math.sin(angle), 0.0)
self.gl.glVertex3d(s1 * math.cos(angle), s1 * math.sin(angle), +z)
self.gl.glVertex3d(s1 * math.cos(angle), s1 * math.sin(angle), -z)
self.gl.glNormal3d(s2 * math.sin(angle + delta) - s1 * math.sin(angle), s1 * math.cos(angle) - s2 * math.cos(angle + delta), 0.0)
self.gl.glVertex3d(s2 * math.cos(angle + delta), s2 * math.sin(angle + delta), +z)
self.gl.glVertex3d(s2 * math.cos(angle + delta), s2 * math.sin(angle + delta), -z)
self.gl.glVertex3d(r1, 0.0, +z)
self.gl.glVertex3d(r1, 0.0, -z)
self.gl.glEnd()
self.gl.glShadeModel(self.gl.GL_SMOOTH)
self.gl.glBegin(self.gl.GL_QUAD_STRIP)
for i in range(toothCount+1):
angle = i * 2.0 * math.pi / toothCount
self.gl.glNormal3d(-math.cos(angle), -math.sin(angle), 0.0)
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), +z)
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), -z)
self.gl.glEnd()
self.gl.glEndList()
return list
def drawGear(self, gear, dx, dy, dz, angle):
self.gl.glPushMatrix()
self.gl.glTranslated(dx, dy, dz)
self.gl.glRotated(angle, 0.0, 0.0, 1.0)
self.gl.glCallList(gear)
self.gl.glPopMatrix()
def normalizeAngle(self, angle):
while (angle < 0):
angle += 360 * 16
while (angle > 360 * 16):
angle -= 360 * 16
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
centralWidget = QWidget()
self.setCentralWidget(centralWidget)
self.glWidget = GLWidget()
self.pixmapLabel = QLabel()
self.glWidgetArea = QScrollArea()
self.glWidgetArea.setWidget(self.glWidget)
self.glWidgetArea.setWidgetResizable(True)
self.glWidgetArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.glWidgetArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.glWidgetArea.setSizePolicy(QSizePolicy.Ignored,
QSizePolicy.Ignored)
self.glWidgetArea.setMinimumSize(50, 50)
self.pixmapLabelArea = QScrollArea()
self.pixmapLabelArea.setWidget(self.pixmapLabel)
self.pixmapLabelArea.setSizePolicy(QSizePolicy.Ignored,
QSizePolicy.Ignored)
self.pixmapLabelArea.setMinimumSize(50, 50)
xSlider = self.createSlider(self.glWidget.xRotationChanged,
self.glWidget.setXRotation)
ySlider = self.createSlider(self.glWidget.yRotationChanged,
self.glWidget.setYRotation)
zSlider = self.createSlider(self.glWidget.zRotationChanged,
self.glWidget.setZRotation)
self.createActions()
self.createMenus()
centralLayout = QGridLayout()
centralLayout.addWidget(self.glWidgetArea, 0, 0)
centralLayout.addWidget(self.pixmapLabelArea, 0, 1)
centralLayout.addWidget(xSlider, 1, 0, 1, 2)
centralLayout.addWidget(ySlider, 2, 0, 1, 2)
centralLayout.addWidget(zSlider, 3, 0, 1, 2)
centralWidget.setLayout(centralLayout)
xSlider.setValue(15 * 16)
ySlider.setValue(345 * 16)
zSlider.setValue(0 * 16)
self.setWindowTitle("Grabber")
self.resize(400, 300)
def grabFrameBuffer(self):
image = self.glWidget.grabFramebuffer()
self.setPixmap(QPixmap.fromImage(image))
def clearPixmap(self):
self.setPixmap(QPixmap())
def about(self):
QMessageBox.about(self, "About Grabber",
"The <b>Grabber</b> example demonstrates two approaches for "
"rendering OpenGL into a Qt pixmap.")
def createActions(self):
self.grabFrameBufferAct = QAction("&Grab Frame Buffer", self,
shortcut="Ctrl+G", triggered=self.grabFrameBuffer)
self.clearPixmapAct = QAction("&Clear Pixmap", self,
shortcut="Ctrl+L", triggered=self.clearPixmap)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
triggered=self.close)
self.aboutAct = QAction("&About", self, triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
triggered=QApplication.instance().aboutQt)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.grabFrameBufferAct)
self.fileMenu.addAction(self.clearPixmapAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
def createSlider(self, changedSignal, setterSlot):
slider = QSlider(Qt.Horizontal)
slider.setRange(0, 360 * 16)
slider.setSingleStep(16)
slider.setPageStep(15 * 16)
slider.setTickInterval(15 * 16)
slider.setTickPosition(QSlider.TicksRight)
slider.valueChanged.connect(setterSlot)
changedSignal.connect(slider.setValue)
return slider
def setPixmap(self, pixmap):
self.pixmapLabel.setPixmap(pixmap)
size = pixmap.size()
if size - QSize(1, 0) == self.pixmapLabelArea.maximumViewportSize():
size -= QSize(1, 0)
self.pixmapLabel.resize(size)
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit(app.exec_())
| 35.522727 | 145 | 0.603967 | [
"MIT"
] | ArjandeV/iracing-overlay | PyQt5_gpl-5.8/examples/opengl/grabber.py | 14,067 | Python |
"""
Data processing routines
Deepak Baby, UGent, June 2018
[email protected]
"""
import numpy as np
def reconstruct_wav(wavmat, stride_factor=0.5):
"""
Reconstructs the audiofile from sliced matrix wavmat
"""
window_length = wavmat.shape[1]
window_stride = int(stride_factor * window_length)
wav_length = (wavmat.shape[0] -1 ) * window_stride + window_length
wav_recon = np.zeros((1,wav_length))
#print ("wav recon shape " + str(wav_recon.shape))
for k in range (wavmat.shape[0]):
wav_beg = k * window_stride
wav_end = wav_beg + window_length
wav_recon[0, wav_beg:wav_end] += wavmat[k, :]
# now compute the scaling factor for multiple instances
noverlap = int(np.ceil(1/stride_factor))
scale_ = (1/float(noverlap)) * np.ones((1, wav_length))
for s in range(noverlap-1):
s_beg = s * window_stride
s_end = s_beg + window_stride
scale_[0, s_beg:s_end] = 1/ (s+1)
scale_[0, -s_beg - 1 : -s_end:-1] = 1/ (s+1)
return wav_recon * scale_
def pre_emph(x, coeff=0.95):
"""
Apply pre_emph on 2d data (batch_size x window_length)
"""
#print ("x shape: " + str(x.shape))
x0 = x[:, 0]
x0 = np.expand_dims(x0, axis=1)
diff = x[:, 1:] - coeff * x[:, :-1]
x_preemph = np.concatenate((x0, diff), axis=1)
if not x.shape == x_preemph.shape:
print ("ERROR: Pre-emphasis is wrong")
#print ("x_preemph shape: " + str(x_preemph.shape))
return x_preemph
def de_emph(y, coeff=0.95):
"""
Apply de_emphasis on test data: works only on 1d data
"""
if coeff <= 0:
return y
x = np.zeros((y.shape[0],), dtype=np.float32)
#print("in_shape" + str(y.shape))
x[0] = y[0]
for n in range(1, y.shape[0], 1):
x[n] = coeff * x[n - 1] + y[n]
return x
def data_preprocess(wav, preemph=0.95):
wav = (2./65535.) * (wav.astype('float32') - 32767) + 1.
if preemph > 0:
wav = pre_emph(wav, coeff=preemph)
return wav.astype('float32')
| 29.075758 | 68 | 0.642522 | [
"MIT"
] | deepakbaby/isegan | data_ops.py | 1,919 | Python |
errno_map = {
"1": {
"comment": "Operation not permitted",
"name": "EPERM"
},
"2": {
"comment": "No such file or directory",
"name": "ENOENT"
},
"3": {
"comment": "No such process",
"name": "ESRCH"
},
"4": {
"comment": "Interrupted system call",
"name": "EINTR"
},
"5": {
"comment": "I/O error",
"name": "EIO"
},
"6": {
"comment": "No such device or address",
"name": "ENXIO"
},
"7": {
"comment": "Argument list too long",
"name": "E2BIG"
},
"8": {
"comment": "Exec format error",
"name": "ENOEXEC"
},
"9": {
"comment": "Bad file number",
"name": "EBADF"
},
"10": {
"comment": "No child processes",
"name": "ECHILD"
},
"11": {
"comment": "Try again",
"name": "EAGAIN"
},
"12": {
"comment": "Out of memory",
"name": "ENOMEM"
},
"13": {
"comment": "Permission denied",
"name": "EACCES"
},
"14": {
"comment": "Bad address",
"name": "EFAULT"
},
"15": {
"comment": "Block device required",
"name": "ENOTBLK"
},
"16": {
"comment": "Device or resource busy",
"name": "EBUSY"
},
"17": {
"comment": "File exists",
"name": "EEXIST"
},
"18": {
"comment": "Cross-device link",
"name": "EXDEV"
},
"19": {
"comment": "No such device",
"name": "ENODEV"
},
"20": {
"comment": "Not a directory",
"name": "ENOTDIR"
},
"21": {
"comment": "Is a directory",
"name": "EISDIR"
},
"22": {
"comment": "Invalid argument",
"name": "EINVAL"
},
"23": {
"comment": "File table overflow",
"name": "ENFILE"
},
"24": {
"comment": "Too many open files",
"name": "EMFILE"
},
"25": {
"comment": "Not a typewriter",
"name": "ENOTTY"
},
"26": {
"comment": "Text file busy",
"name": "ETXTBSY"
},
"27": {
"comment": "File too large",
"name": "EFBIG"
},
"28": {
"comment": "No space left on device",
"name": "ENOSPC"
},
"29": {
"comment": "Illegal seek",
"name": "ESPIPE"
},
"30": {
"comment": "Read-only file system",
"name": "EROFS"
},
"31": {
"comment": "Too many links",
"name": "EMLINK"
},
"32": {
"comment": "Broken pipe",
"name": "EPIPE"
},
"33": {
"comment": "Math argument out of domain of func",
"name": "EDOM"
},
"34": {
"comment": "Math result not representable",
"name": "ERANGE"
},
"35": {
"comment": "Resource deadlock would occur",
"name": "EDEADLK"
},
"36": {
"comment": "File name too long",
"name": "ENAMETOOLONG"
},
"37": {
"comment": "No record locks available",
"name": "ENOLCK"
},
"38": {
"comment": "Function not implemented",
"name": "ENOSYS"
},
"39": {
"comment": "Directory not empty",
"name": "ENOTEMPTY"
},
"40": {
"comment": "Too many symbolic links encountered",
"name": "ELOOP"
},
"42": {
"comment": "No message of desired type",
"name": "ENOMSG"
},
"43": {
"comment": "Identifier removed",
"name": "EIDRM"
},
"44": {
"comment": "Channel number out of range",
"name": "ECHRNG"
},
"45": {
"comment": "Level 2 not synchronized",
"name": "EL2NSYNC"
},
"46": {
"comment": "Level 3 halted",
"name": "EL3HLT"
},
"47": {
"comment": "Level 3 reset",
"name": "EL3RST"
},
"48": {
"comment": "Link number out of range",
"name": "ELNRNG"
},
"49": {
"comment": "Protocol driver not attached",
"name": "EUNATCH"
},
"50": {
"comment": "No CSI structure available",
"name": "ENOCSI"
},
"51": {
"comment": "Level 2 halted",
"name": "EL2HLT"
},
"52": {
"comment": "Invalid exchange",
"name": "EBADE"
},
"53": {
"comment": "Invalid request descriptor",
"name": "EBADR"
},
"54": {
"comment": "Exchange full",
"name": "EXFULL"
},
"55": {
"comment": "No anode",
"name": "ENOANO"
},
"56": {
"comment": "Invalid request code",
"name": "EBADRQC"
},
"57": {
"comment": "Invalid slot",
"name": "EBADSLT"
},
"59": {
"comment": "Bad font file format",
"name": "EBFONT"
},
"60": {
"comment": "Device not a stream",
"name": "ENOSTR"
},
"61": {
"comment": "No data available",
"name": "ENODATA"
},
"62": {
"comment": "Timer expired",
"name": "ETIME"
},
"63": {
"comment": "Out of streams resources",
"name": "ENOSR"
},
"64": {
"comment": "Machine is not on the network",
"name": "ENONET"
},
"65": {
"comment": "Package not installed",
"name": "ENOPKG"
},
"66": {
"comment": "Object is remote",
"name": "EREMOTE"
},
"67": {
"comment": "Link has been severed",
"name": "ENOLINK"
},
"68": {
"comment": "Advertise error",
"name": "EADV"
},
"69": {
"comment": "Srmount error",
"name": "ESRMNT"
},
"70": {
"comment": "Communication error on send",
"name": "ECOMM"
},
"71": {
"comment": "Protocol error",
"name": "EPROTO"
},
"72": {
"comment": "Multihop attempted",
"name": "EMULTIHOP"
},
"73": {
"comment": "RFS specific error",
"name": "EDOTDOT"
},
"74": {
"comment": "Not a data message",
"name": "EBADMSG"
},
"75": {
"comment": "Value too large for defined data type",
"name": "EOVERFLOW"
},
"76": {
"comment": "Name not unique on network",
"name": "ENOTUNIQ"
},
"77": {
"comment": "File descriptor in bad state",
"name": "EBADFD"
},
"78": {
"comment": "Remote address changed",
"name": "EREMCHG"
},
"79": {
"comment": "Can not access a needed shared library",
"name": "ELIBACC"
},
"80": {
"comment": "Accessing a corrupted shared library",
"name": "ELIBBAD"
},
"81": {
"comment": ".lib section in a.out corrupted",
"name": "ELIBSCN"
},
"82": {
"comment": "Attempting to link in too many shared libraries",
"name": "ELIBMAX"
},
"83": {
"comment": "Cannot exec a shared library directly",
"name": "ELIBEXEC"
},
"84": {
"comment": "Illegal byte sequence",
"name": "EILSEQ"
},
"85": {
"comment": "Interrupted system call should be restarted",
"name": "ERESTART"
},
"86": {
"comment": "Streams pipe error",
"name": "ESTRPIPE"
},
"87": {
"comment": "Too many users",
"name": "EUSERS"
},
"88": {
"comment": "Socket operation on non-socket",
"name": "ENOTSOCK"
},
"89": {
"comment": "Destination address required",
"name": "EDESTADDRREQ"
},
"90": {
"comment": "Message too long",
"name": "EMSGSIZE"
},
"91": {
"comment": "Protocol wrong type for socket",
"name": "EPROTOTYPE"
},
"92": {
"comment": "Protocol not available",
"name": "ENOPROTOOPT"
},
"93": {
"comment": "Protocol not supported",
"name": "EPROTONOSUPBOARD"
},
"94": {
"comment": "Socket type not supported",
"name": "ESOCKTNOSUPBOARD"
},
"95": {
"comment": "Operation not supported on transport endpoint",
"name": "EOPNOTSUPP"
},
"96": {
"comment": "Protocol family not supported",
"name": "EPFNOSUPBOARD"
},
"97": {
"comment": "Address family not supported by protocol",
"name": "EAFNOSUPBOARD"
},
"98": {
"comment": "Address already in use",
"name": "EADDRINUSE"
},
"99": {
"comment": "Cannot assign requested address",
"name": "EADDRNOTAVAIL"
},
"100": {
"comment": "Network is down",
"name": "ENETDOWN"
},
"101": {
"comment": "Network is unreachable",
"name": "ENETUNREACH"
},
"102": {
"comment": "Network dropped connection because of reset",
"name": "ENETRESET"
},
"103": {
"comment": "Software caused connection abort",
"name": "ECONNABORTED"
},
"104": {
"comment": "Connection reset by peer",
"name": "ECONNRESET"
},
"105": {
"comment": "No buffer space available",
"name": "ENOBUFS"
},
"106": {
"comment": "Transport endpoint is already connected",
"name": "EISCONN"
},
"107": {
"comment": "Transport endpoint is not connected",
"name": "ENOTCONN"
},
"108": {
"comment": "Cannot send after transport endpoint shutdown",
"name": "ESHUTDOWN"
},
"109": {
"comment": "Too many references: cannot splice",
"name": "ETOOMANYREFS"
},
"110": {
"comment": "Connection timed out",
"name": "ETIMEDOUT"
},
"111": {
"comment": "Connection refused",
"name": "ECONNREFUSED"
},
"112": {
"comment": "Host is down",
"name": "EHOSTDOWN"
},
"113": {
"comment": "No route to host",
"name": "EHOSTUNREACH"
},
"114": {
"comment": "Operation already in progress",
"name": "EALREADY"
},
"115": {
"comment": "Operation now in progress",
"name": "EINPROGRESS"
},
"116": {
"comment": "Stale NFS file handle",
"name": "ESTALE"
},
"117": {
"comment": "Structure needs cleaning",
"name": "EUCLEAN"
},
"118": {
"comment": "Not a XENIX named type file",
"name": "ENOTNAM"
},
"119": {
"comment": "No XENIX sems available",
"name": "ENAVAIL"
},
"120": {
"comment": "Is a named type file",
"name": "EISNAM"
},
"121": {
"comment": "Remote I/O error",
"name": "EREMOTEIO"
},
"122": {
"comment": "Quota exceeded",
"name": "EDQUOT"
},
"123": {
"comment": "No medium found",
"name": "ENOMEDIUM"
},
"124": {
"comment": "Wrong medium type",
"name": "EMEDIUMTYPE"
},
"125": {
"comment": "Operation Canceled",
"name": "ECANCELED"
},
"126": {
"comment": "Required key not available",
"name": "ENOKEY"
},
"127": {
"comment": "Key has expired",
"name": "EKEYEXPIRED"
},
"128": {
"comment": "Key has been revoked",
"name": "EKEYREVOKED"
},
"129": {
"comment": "Key was rejected by service",
"name": "EKEYREJECTED"
},
"1000": {
"comment": "Stack corrupt.",
"name": "ESTACK"
},
"1001": {
"comment": "Watchdog timeout.",
"name": "EWATCHDOGTIMEOUT"
}
} | 23.11583 | 70 | 0.431769 | [
"MIT"
] | Robotonics/simba | make/simbaerrno.py | 11,974 | Python |
from django.urls import include, path
from django.contrib import admin
from django.views.generic import RedirectView
urlpatterns = [
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
]
urlpatterns += [
path('', RedirectView.as_view(url='/polls/')),
] | 23.833333 | 50 | 0.695804 | [
"MIT"
] | feraco/shifting-morals | mysite/urls.py | 286 | Python |
from django.views.generic import TemplateView, CreateView, UpdateView
from django.urls import reverse_lazy
from home_app import forms
from django.contrib.auth.mixins import LoginRequiredMixin
from account_app.models import CustomUser
# Create your views here.
class IndexView(TemplateView):
template_name = 'home_app/index.html'
class ProfileView(LoginRequiredMixin, TemplateView):
template_name = 'home_app/profile.html'
class RegistrationView(CreateView):
form_class = forms.UserCreateForm
success_url = reverse_lazy('home-app:index')
template_name = 'registration/registration.html'
class UserUpdateView(UpdateView):
form_class = forms.UserUpdateForm
success_url = reverse_lazy('home-app:profile')
template_name = 'registration/registration_form.html'
model = CustomUser
class Page403View(TemplateView):
template_name = 'home_app/403.html'
| 27.090909 | 69 | 0.787472 | [
"MIT"
] | xjati46/agoraschool | home_app/views.py | 894 | Python |
from collections import Iterable
from torch import nn
from torch import optim
from torchero import meters
from functools import partial
INVALID_MODE_INFERENCE_MESSAGE = (
"Could not infer mode from meter {meter}"
)
def get_default_mode(meter):
if hasattr(meter.__class__, 'DEFAULT_MODE'):
return getattr(meter.__class__, 'DEFAULT_MODE')
else:
raise Exception(INVALID_MODE_INFERENCE_MESSAGE
.format(meter=getattr(meter, 'name', meter.__class__.__name__)))
optimizers = {
'asgd': optim.ASGD,
'adadelta': optim.Adadelta,
'adagrad': optim.Adagrad,
'adam': optim.Adam,
'adamax': optim.Adamax,
'lbfgs': optim.LBFGS,
'rmsprop': optim.RMSprop,
'rprop': optim.Rprop,
'sgd': lambda params: optim.SGD(params, lr=1e-2),
'sparseadam': optim.SparseAdam
}
def get_optimizer_by_name(name, model):
if name not in optimizers:
raise KeyError("Optimizer {} not found. "
"Optimizer availables: {}"
.format(repr(name),
', '.join(map(repr, optimizers.keys()))))
return optimizers[name](model.parameters())
losses = {
'l1': nn.L1Loss,
'mse': nn.MSELoss,
'cross_entropy': nn.CrossEntropyLoss,
'nll': nn.NLLLoss,
'poisson_nll': nn.PoissonNLLLoss,
'kl_div': nn.KLDivLoss,
'binary_cross_entropy': nn.BCELoss,
'binary_cross_entropy_wl': nn.BCEWithLogitsLoss,
'margin_ranking': nn.MarginRankingLoss,
'hinge': nn.HingeEmbeddingLoss,
'multi_label_hinge': nn.MultiLabelMarginLoss,
'smooth': nn.SmoothL1Loss,
'soft_margin': nn.SoftMarginLoss,
'multilabel_soft_margin': nn.MultiLabelSoftMarginLoss,
'cosine': nn.CosineEmbeddingLoss,
'multi_hinge': nn.MultiMarginLoss,
'triplet_margin': nn.TripletMarginLoss
}
def get_loss_by_name(name):
if name not in losses:
raise KeyError("Loss {} not found. Losses available: {}"
.format(repr(name),
', '.join(map(repr, losses.keys()))))
return losses[name]()
meters_by_name = {
'mse': meters.MSE,
'rmse': meters.RMSE,
'msle': meters.MSLE,
'rmsle': meters.RMSLE,
'categorical_accuracy': meters.CategoricalAccuracy,
'categorical_accuracy_percentage': lambda: meters.CategoricalAccuracy() * 100.0,
'binary_accuracy': meters.BinaryAccuracy,
'binary_accuracy_percentage': lambda: meters.BinaryAccuracy() * 100,
'binary_accuracy_wl': meters.BinaryWithLogitsAccuracy,
'binary_accuracy_wl_percentage': lambda: meters.BinaryWithLogitsAccuracy() * 100,
'confusion_matrix': meters.ConfusionMatrix,
'confusion_matrix_percentage': lambda: meters.ConfusionMatrix() * 100,
'balanced_accuracy': meters.BalancedAccuracy,
}
for name, metric in (('recall', meters.Recall),
('precision', meters.Precision),
('npv', meters.NPV),
('specificity', meters.Specificity),
('f1', meters.F1Score),
('f2', meters.F2Score)):
meters_by_name.update({
name: metric,
name + '_wl': partial(metric, with_logits=True)
})
for agg_name in ('micro', 'macro', 'weighted'):
meters_by_name.update({
agg_name + '_' + name: partial(metric, with_logits=False, agg=agg_name),
agg_name + '_' + name + '_wl': partial(metric, with_logits=True, agg=agg_name)
})
for name, speed_metric, pace_metric in (('batches', meters.BatchSpeed, meters.BatchPace),
('it', meters.IterSpeed, meters.IterPace)):
for unit_abbr, unit in (('sec', 'second'),
('min', 'minute')):
meters_by_name.update({name + '/' + unit_abbr: partial(speed_metric, time_unit=unit),
unit_abbr + '/' + name.replace('batches', 'batch'): partial(pace_metric, time_unit=unit)})
def get_meters_by_name(name):
if name not in meters_by_name:
raise KeyError("Meter {} not found. Meters available: {}"
.format(repr(name),
', '.join(map(repr, meters_by_name.keys()))))
return meters_by_name[name]()
def parse_meters(meters):
def to_small_case(obj):
if hasattr(obj, 'name'):
s = str(obj.name)
else:
name = obj.__class__.__name__
s = ''
for i in range(len(name)-1):
s += name[i].lower()
if name[i].islower() and not name[i+1].islower():
s += '_'
s += name[-1].lower()
return s
def parse(obj):
if isinstance(obj, str):
return get_meters_by_name(obj)
else:
return obj
def parse_name(obj):
if isinstance(obj, str):
obj = get_meters_by_name(obj)
return to_small_case(obj)
if isinstance(meters, dict):
return {k: parse(v) for k, v in meters.items()}
elif isinstance(meters, Iterable):
return {parse_name(v): parse(v) for v in meters}
else:
raise Exception("Expected iterable meters")
time_units = {'hour': 60*60,
'hours': 60*60,
'minute': 60,
'minutes': 60,
'second': 1,
'seconds': 1}
def parse_time_unit(time_unit):
if isinstance(time_unit, (int, float)):
return time_unit
elif isinstance(time_unit, str) and time_unit in time_units:
return time_units[time_unit]
elif isinstance(time_unit, str):
raise ValueError("Invalid time_unit reference!")
else:
raise TypeError("Invalid type for time_unit")
| 33.641176 | 121 | 0.600979 | [
"MIT"
] | juancruzsosa/torchero | torchero/utils/defaults.py | 5,719 | Python |
# Generated by Django 3.2 on 2021-09-07 12:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('payment', '0002_alter_invoice_address'),
('item', '0002_alter_item_upc'),
('accounts', '0002_auto_20210831_0046'),
('service', '0008_service_available'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='created time')),
('modified_time', models.DateTimeField(auto_now=True, verbose_name='modified time')),
('is_paid', models.BooleanField(default=False, verbose_name='is paid')),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='carts', to='accounts.customer', verbose_name='customer')),
('invoice', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='cart', to='payment.invoice', verbose_name='invoice')),
('service', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='carts', to='service.service', verbose_name='service')),
],
options={
'verbose_name': 'Cart',
'verbose_name_plural': 'Carts',
'db_table': 'cart',
},
),
migrations.CreateModel(
name='CartLine',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='created time')),
('modified_time', models.DateTimeField(auto_now=True, verbose_name='modified time')),
('quantity', models.PositiveIntegerField(default=1, verbose_name='quantity')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lines', to='cart.cart', verbose_name='cart')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='lines', to='item.item', verbose_name='item')),
],
options={
'verbose_name': 'Cart line',
'verbose_name_plural': 'Cart lines',
'db_table': 'cart_line',
'ordering': ('created_time', 'modified_time'),
'unique_together': {('item', 'cart')},
},
),
] | 51.222222 | 187 | 0.606652 | [
"MIT"
] | jamedadi/yummy | cart/migrations/0001_initial.py | 2,766 | Python |
#!/usr/bin/env python
from __future__ import print_function, absolute_import
import re
import subprocess
import os
import time
import argparse
import sys
class SmartDevice(object):
smartcmdfmt = ['sudo', 'smartctl', '-f', 'brief', '-A', '/dev/{dev}']
def __init__(self, dev):
self.dev = dev
self.attrcmd = [x.format(dev=dev) for x in self.smartcmdfmt]
def attributes(self):
try:
out = subprocess.check_output(self.attrcmd)
except (OSError, subprocess.CalledProcessError) as err:
print('Error running command: {0}'.format(err), file=sys.stderr)
return
for line in out.split("\n"):
res = re.match('\s*(?P<id>\d+)\s+(?P<name>[\w-]+)\s+'
'(?P<flags>[POSRCK-]{6})\s+'
'(?P<value>\d+)\s+(?P<worst>\d+)\s+'
'(?P<thres>\d+)\s+(?P<fail>[\w-]+)\s+'
'(?P<raw_value>\d+)', line)
if not res:
continue
yield res.groupdict()
def dev_exists(dev):
return os.path.exists('/dev/{0}'.format(dev))
def get_filelist(dirname, pattern):
return [f for f in os.listdir(dirname) if re.match(pattern, f)]
def expand_devices(devlist):
expanded_devlist = []
for dev in devlist:
if dev == 'autodetect':
expanded_devlist.extend(get_filelist('/dev', r'^sd[a-z]+$'))
else:
expanded_devlist.append(dev)
return sorted(list(set(expanded_devlist)))
def smartmon_loop(devices, hostname, interval):
while True:
for dev in devices:
if dev_exists(dev):
for attr in SmartDevice(dev).attributes():
print('PUTVAL "{hostname}/smart-{dev}'
'/absolute-{attr_id:03d}_{attr_name}"'
' interval={interval:d} N:{value:d}'
.format(hostname=hostname, dev=dev,
attr_id=int(attr['id']),
attr_name=attr.get('name'),
interval=int(interval),
value=int(attr['raw_value'])))
time.sleep(interval)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dev', nargs='*',
help='devices to check (default: autodetect)')
parser.add_argument('-H', '--hostname', type=str,
help='override hostname provided by collectd',
default=os.environ.get('COLLECTD_HOSTNAME'))
parser.add_argument('-i', '--interval', type=int,
help='override interval provided by collectd',
default=int(float(os.environ.get('COLLECTD_INTERVAL', 300))))
parser.add_argument('-c', '--dont-check-devs',
action='store_true', default=False,
help='do not check devices existence at startup')
args = parser.parse_args()
hostname = (args.hostname
or subprocess.check_output(['hostname', '-f']).strip())
if len(hostname) == 0:
parser.error('unable to detect hostname')
interval = max(args.interval, 5)
if len(args.dev) == 0:
devices = expand_devices(['autodetect'])
else:
devices = expand_devices(args.dev)
if not args.dont_check_devs:
for dev in devices:
if not dev_exists(dev):
parser.error('device "/dev/{0}" does not exist'.format(dev))
try:
smartmon_loop(devices, hostname, interval)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| 35.76699 | 85 | 0.540717 | [
"MIT"
] | nlm/collectd-smartmon | collectd-smartmon.py | 3,684 | Python |
# -*- coding: utf-8 -*-
from unittest import mock
from vispy.scene.visuals import Image
from vispy.testing import (requires_application, TestingCanvas,
run_tests_if_main)
from vispy.testing.image_tester import assert_image_approved, downsample
import numpy as np
import pytest
@requires_application()
@pytest.mark.parametrize('is_3d', [True, False])
def test_image(is_3d):
"""Test image visual"""
size = (100, 50)
with TestingCanvas(size=size, bgcolor='w') as c:
image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
shape = (size[1]-10, size[0]-10) + ((3,) if is_3d else ())
np.random.seed(379823)
data = np.random.rand(*shape)
image.set_data(data)
assert_image_approved(c.render(), "visuals/image%s.png" %
("_rgb" if is_3d else "_mono"))
def _make_test_data(shape, input_dtype):
data = np.random.random_sample(shape)
if data.ndim == 3 and data.shape[-1] == 4:
# RGBA - make alpha fully opaque
data[..., -1] = 1.0
max_val = _max_for_dtype(input_dtype)
if max_val != 1:
data *= max_val
data = data.astype(input_dtype)
return data
def _compare_render(orig_data, rendered_data, previous_render=None, atol=1):
predicted = _make_rgba(orig_data)
np.testing.assert_allclose(rendered_data.astype(float), predicted.astype(float), atol=atol)
if previous_render is not None:
# assert not allclose
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_data, previous_render, atol=10)
def _set_image_data(image, data, should_fail):
if should_fail:
pytest.raises(ValueError, image.set_data, data)
return
image.set_data(data)
def _max_for_dtype(input_dtype):
if np.issubdtype(input_dtype, np.integer):
max_val = np.iinfo(input_dtype).max
else:
max_val = 1.0
return max_val
def _get_orig_and_new_clims(input_dtype):
new_clim = (0.3, 0.8)
max_val = _max_for_dtype(input_dtype)
if np.issubdtype(input_dtype, np.integer):
new_clim = (int(new_clim[0] * max_val), int(new_clim[1] * max_val))
return (0, max_val), new_clim
@requires_application()
@pytest.mark.parametrize('data_on_init', [False, True])
@pytest.mark.parametrize('clim_on_init', [False, True])
@pytest.mark.parametrize('num_channels', [0, 1, 3, 4])
@pytest.mark.parametrize('texture_format', [None, '__dtype__', 'auto'])
@pytest.mark.parametrize('input_dtype', [np.uint8, np.uint16, np.float32, np.float64])
def test_image_clims_and_gamma(input_dtype, texture_format, num_channels,
clim_on_init, data_on_init):
"""Test image visual with clims and gamma on shader."""
size = (40, 40)
if texture_format == '__dtype__':
texture_format = input_dtype
shape = size + (num_channels,) if num_channels > 0 else size
np.random.seed(0)
data = _make_test_data(shape, input_dtype)
orig_clim, new_clim = _get_orig_and_new_clims(input_dtype)
# 16-bit integers and above seem to have precision loss when scaled on the CPU
is_16int_cpu_scaled = (np.dtype(input_dtype).itemsize >= 2 and
np.issubdtype(input_dtype, np.integer) and
texture_format is None)
clim_atol = 2 if is_16int_cpu_scaled else 1
gamma_atol = 3 if is_16int_cpu_scaled else 2
kwargs = {}
if clim_on_init:
kwargs['clim'] = orig_clim
if data_on_init:
kwargs['data'] = data
# default is RGBA, anything except auto requires reformat
set_data_fails = (num_channels != 4 and
texture_format is not None and
texture_format != 'auto')
with TestingCanvas(size=size[::-1], bgcolor="w") as c:
image = Image(cmap='grays', texture_format=texture_format,
parent=c.scene, **kwargs)
if not data_on_init:
_set_image_data(image, data, set_data_fails)
if set_data_fails:
return
rendered = c.render()
_dtype = rendered.dtype
shape_ratio = rendered.shape[0] // data.shape[0]
rendered1 = downsample(rendered, shape_ratio, axis=(0, 1)).astype(_dtype)
_compare_render(data, rendered1)
# adjust color limits
image.clim = new_clim
rendered2 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
scaled_data = (np.clip(data, new_clim[0], new_clim[1]) - new_clim[0]) / (new_clim[1] - new_clim[0])
_compare_render(scaled_data, rendered2, rendered1, atol=clim_atol)
# adjust gamma
image.gamma = 2
rendered3 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
_compare_render(scaled_data ** 2, rendered3, rendered2, atol=gamma_atol)
@requires_application()
def test_image_vertex_updates():
"""Test image visual coordinates are only built when needed."""
size = (40, 40)
with TestingCanvas(size=size, bgcolor="w") as c:
shape = size + (3,)
np.random.seed(0)
image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
with mock.patch.object(
image, '_build_vertex_data',
wraps=image._build_vertex_data) as build_vertex_mock:
data = np.random.rand(*shape)
image.set_data(data)
c.render()
build_vertex_mock.assert_called_once()
build_vertex_mock.reset_mock() # reset the count to 0
# rendering again shouldn't cause vertex coordinates to be built
c.render()
build_vertex_mock.assert_not_called()
# changing to data of the same shape shouldn't cause it
data = np.zeros_like(data)
image.set_data(data)
c.render()
build_vertex_mock.assert_not_called()
# changing to another shape should
data = data[:-5, :-5]
image.set_data(data)
c.render()
build_vertex_mock.assert_called_once()
def _make_rgba(data_in):
max_val = _max_for_dtype(data_in.dtype)
if data_in.ndim == 3 and data_in.shape[-1] == 1:
data_in = data_in.squeeze()
if data_in.ndim == 2:
out = np.stack([data_in] * 4, axis=2)
out[:, :, 3] = max_val
elif data_in.shape[-1] == 3:
out = np.concatenate((data_in, np.ones((*data_in.shape[:2], 1)) * max_val), axis=2)
else:
out = data_in
return np.round((out.astype(np.float) * 255 / max_val)).astype(np.uint8)
run_tests_if_main()
| 36.744444 | 107 | 0.633958 | [
"BSD-3-Clause"
] | 3DAlgoLab/vispy | vispy/visuals/tests/test_image.py | 6,614 | Python |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rqalpha.model.base_position import BasePosition
from rqalpha.environment import Environment
from rqalpha.const import SIDE, POSITION_EFFECT, DEFAULT_ACCOUNT_TYPE
class FuturePosition(BasePosition):
__abandon_properties__ = []
def __init__(self, order_book_id):
super(FuturePosition, self).__init__(order_book_id)
self._buy_old_holding_list = []
self._sell_old_holding_list = []
self._buy_today_holding_list = []
self._sell_today_holding_list = []
self._buy_transaction_cost = 0.
self._sell_transaction_cost = 0.
self._buy_realized_pnl = 0.
self._sell_realized_pnl = 0.
self._buy_avg_open_price = 0.
self._sell_avg_open_price = 0.
def __repr__(self):
return 'FuturePosition({})'.format(self.__dict__)
def get_state(self):
return {
'order_book_id': self._order_book_id,
'buy_old_holding_list': self._buy_old_holding_list,
'sell_old_holding_list': self._sell_old_holding_list,
'buy_today_holding_list': self._buy_today_holding_list,
'sell_today_holding_list': self._sell_today_holding_list,
'buy_transaction_cost': self._buy_transaction_cost,
'sell_transaction_cost': self._sell_transaction_cost,
'buy_realized_pnl': self._buy_realized_pnl,
'sell_realized_pnl': self._sell_realized_pnl,
'buy_avg_open_price': self._buy_avg_open_price,
'sell_avg_open_price': self._sell_avg_open_price,
# margin rate may change
'margin_rate': self.margin_rate,
}
def set_state(self, state):
assert self._order_book_id == state['order_book_id']
self._buy_old_holding_list = state['buy_old_holding_list']
self._sell_old_holding_list = state['sell_old_holding_list']
self._buy_today_holding_list = state['buy_today_holding_list']
self._sell_today_holding_list = state['sell_today_holding_list']
self._buy_transaction_cost = state['buy_transaction_cost']
self._sell_transaction_cost = state['sell_transaction_cost']
self._buy_avg_open_price = state['buy_avg_open_price']
self._sell_avg_open_price = state['sell_avg_open_price']
@property
def type(self):
return DEFAULT_ACCOUNT_TYPE.FUTURE.name
@property
def margin_rate(self):
env = Environment.get_instance()
margin_info = env.data_proxy.get_margin_info(self.order_book_id)
margin_multiplier = env.config.base.margin_multiplier
return margin_info['long_margin_ratio'] * margin_multiplier
@property
def market_value(self):
return (self.buy_quantity - self.sell_quantity) * self.last_price * self.contract_multiplier
@property
def buy_market_value(self):
return self.buy_quantity * self.last_price * self.contract_multiplier
@property
def sell_market_value(self):
return self.sell_quantity * self.last_price * self.contract_multiplier
# -- PNL 相关
@property
def contract_multiplier(self):
return Environment.get_instance().get_instrument(self.order_book_id).contract_multiplier
@property
def open_orders(self):
return Environment.get_instance().broker.get_open_orders(self.order_book_id)
@property
def buy_holding_pnl(self):
"""
[float] 买方向当日持仓盈亏
"""
return (self.last_price - self.buy_avg_holding_price) * self.buy_quantity * self.contract_multiplier
@property
def sell_holding_pnl(self):
"""
[float] 卖方向当日持仓盈亏
"""
return (self.sell_avg_holding_price - self.last_price) * self.sell_quantity * self.contract_multiplier
@property
def buy_realized_pnl(self):
"""
[float] 买方向平仓盈亏
"""
return self._buy_realized_pnl
@property
def sell_realized_pnl(self):
"""
[float] 卖方向平仓盈亏
"""
return self._sell_realized_pnl
@property
def holding_pnl(self):
"""
[float] 当日持仓盈亏
"""
return self.buy_holding_pnl + self.sell_holding_pnl
@property
def realized_pnl(self):
"""
[float] 当日平仓盈亏
"""
return self.buy_realized_pnl + self.sell_realized_pnl
@property
def buy_daily_pnl(self):
"""
[float] 当日买方向盈亏
"""
return self.buy_holding_pnl + self.buy_realized_pnl
@property
def sell_daily_pnl(self):
"""
[float] 当日卖方向盈亏
"""
return self.sell_holding_pnl + self.sell_realized_pnl
@property
def daily_pnl(self):
"""
[float] 当日盈亏
"""
return self.holding_pnl + self.realized_pnl
@property
def buy_pnl(self):
"""
[float] 买方向累计盈亏
"""
return (self.last_price - self._buy_avg_open_price) * self.buy_quantity * self.contract_multiplier
@property
def sell_pnl(self):
"""
[float] 卖方向累计盈亏
"""
return (self._sell_avg_open_price - self.last_price) * self.sell_quantity * self.contract_multiplier
@property
def pnl(self):
"""
[float] 累计盈亏
"""
return self.buy_pnl + self.sell_pnl
# -- Quantity 相关
@property
def buy_open_order_quantity(self):
"""
[int] 买方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if
order.side == SIDE.BUY and order.position_effect == POSITION_EFFECT.OPEN)
@property
def sell_open_order_quantity(self):
"""
[int] 卖方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if
order.side == SIDE.SELL and order.position_effect == POSITION_EFFECT.OPEN)
@property
def buy_close_order_quantity(self):
"""
[int] 买方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and
order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
@property
def sell_close_order_quantity(self):
"""
[int] 卖方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and
order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
@property
def _buy_close_today_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and
order.position_effect == POSITION_EFFECT.CLOSE_TODAY)
@property
def _sell_close_today_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and
order.position_effect == POSITION_EFFECT.CLOSE_TODAY)
@property
def _closable_today_sell_quantity(self):
return self.sell_today_quantity - self._buy_close_today_order_quantity
@property
def _closable_today_buy_quantity(self):
return self.buy_today_quantity - self._sell_close_today_order_quantity
@property
def buy_old_quantity(self):
"""
[int] 买方向昨仓
"""
return sum(amount for price, amount in self._buy_old_holding_list)
@property
def sell_old_quantity(self):
"""
[int] 卖方向昨仓
"""
return sum(amount for price, amount in self._sell_old_holding_list)
@property
def buy_today_quantity(self):
"""
[int] 买方向今仓
"""
return sum(amount for price, amount in self._buy_today_holding_list)
@property
def sell_today_quantity(self):
"""
[int] 卖方向今仓
"""
return sum(amount for price, amount in self._sell_today_holding_list)
@property
def buy_quantity(self):
"""
[int] 买方向持仓
"""
return self.buy_old_quantity + self.buy_today_quantity
@property
def sell_quantity(self):
"""
[int] 卖方向持仓
"""
return self.sell_old_quantity + self.sell_today_quantity
@property
def closable_buy_quantity(self):
"""
[float] 可平买方向持仓
"""
return self.buy_quantity - self.sell_close_order_quantity
@property
def closable_sell_quantity(self):
"""
[float] 可平卖方向持仓
"""
return self.sell_quantity - self.buy_close_order_quantity
# -- Margin 相关
@property
def buy_margin(self):
"""
[float] 买方向持仓保证金
"""
return self._buy_holding_cost * self.margin_rate
@property
def sell_margin(self):
"""
[float] 卖方向持仓保证金
"""
return self._sell_holding_cost * self.margin_rate
@property
def margin(self):
"""
[float] 保证金
"""
# TODO: 需要添加单向大边相关的处理逻辑
return self.buy_margin + self.sell_margin
@property
def buy_avg_holding_price(self):
"""
[float] 买方向持仓均价
"""
return 0 if self.buy_quantity == 0 else self._buy_holding_cost / self.buy_quantity / self.contract_multiplier
@property
def sell_avg_holding_price(self):
"""
[float] 卖方向持仓均价
"""
return 0 if self.sell_quantity == 0 else self._sell_holding_cost / self.sell_quantity / self.contract_multiplier
@property
def _buy_holding_cost(self):
return sum(p * a * self.contract_multiplier for p, a in self.buy_holding_list)
@property
def _sell_holding_cost(self):
return sum(p * a * self.contract_multiplier for p, a in self.sell_holding_list)
@property
def buy_holding_list(self):
return self._buy_old_holding_list + self._buy_today_holding_list
@property
def sell_holding_list(self):
return self._sell_old_holding_list + self._sell_today_holding_list
@property
def buy_avg_open_price(self):
return self._buy_avg_open_price
@property
def sell_avg_open_price(self):
return self._sell_avg_open_price
@property
def buy_transaction_cost(self):
return self._buy_transaction_cost
@property
def sell_transaction_cost(self):
return self._sell_transaction_cost
@property
def transaction_cost(self):
return self._buy_transaction_cost + self._sell_transaction_cost
# -- Function
def cal_close_today_amount(self, trade_amount, trade_side):
if trade_side == SIDE.SELL:
close_today_amount = trade_amount - self.buy_old_quantity
else:
close_today_amount = trade_amount - self.sell_old_quantity
return max(close_today_amount, 0)
def apply_settlement(self):
env = Environment.get_instance()
data_proxy = env.data_proxy
trading_date = env.trading_dt.date()
settle_price = data_proxy.get_settle_price(self.order_book_id, trading_date)
self._buy_old_holding_list = [(settle_price, self.buy_quantity)]
self._sell_old_holding_list = [(settle_price, self.sell_quantity)]
self._buy_today_holding_list = []
self._sell_today_holding_list = []
self._buy_transaction_cost = 0.
self._sell_transaction_cost = 0.
self._buy_realized_pnl = 0.
self._sell_realized_pnl = 0.
def _margin_of(self, quantity, price):
env = Environment.get_instance()
instrument = env.data_proxy.instruments(self.order_book_id)
return quantity * instrument.contract_multiplier * price * self.margin_rate
def apply_trade(self, trade):
trade_quantity = trade.last_quantity
if trade.side == SIDE.BUY:
if trade.position_effect == POSITION_EFFECT.OPEN:
self._buy_avg_open_price = (self._buy_avg_open_price * self.buy_quantity +
trade_quantity * trade.last_price) / (self.buy_quantity + trade_quantity)
self._buy_transaction_cost += trade.transaction_cost
self._buy_today_holding_list.insert(0, (trade.last_price, trade_quantity))
return -1 * self._margin_of(trade_quantity, trade.last_price)
else:
old_margin = self.margin
self._sell_transaction_cost += trade.transaction_cost
delta_realized_pnl = self._close_holding(trade)
self._sell_realized_pnl += delta_realized_pnl
return old_margin - self.margin + delta_realized_pnl
else:
if trade.position_effect == POSITION_EFFECT.OPEN:
self._sell_avg_open_price = (self._sell_avg_open_price * self.sell_quantity +
trade_quantity * trade.last_price) / (self.sell_quantity + trade_quantity)
self._sell_transaction_cost += trade.transaction_cost
self._sell_today_holding_list.insert(0, (trade.last_price, trade_quantity))
return -1 * self._margin_of(trade_quantity, trade.last_price)
else:
old_margin = self.margin
self._buy_transaction_cost += trade.transaction_cost
delta_realized_pnl = self._close_holding(trade)
self._buy_realized_pnl += delta_realized_pnl
return old_margin - self.margin + delta_realized_pnl
def _close_holding(self, trade):
left_quantity = trade.last_quantity
delta = 0
if trade.side == SIDE.BUY:
# 先平昨仓
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._sell_old_holding_list) != 0:
old_price, old_quantity = self._sell_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
# 再平今仓
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._sell_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
else:
# 先平昨仓
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._buy_old_holding_list) != 0:
old_price, old_quantity = self._buy_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
# 再平今仓
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._buy_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
left_quantity = 0
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
return delta
def _cal_realized_pnl(self, cost_price, trade_price, side, consumed_quantity):
if side == SIDE.BUY:
return (cost_price - trade_price) * consumed_quantity * self.contract_multiplier
else:
return (trade_price - cost_price) * consumed_quantity * self.contract_multiplier
| 35.285124 | 120 | 0.641293 | [
"Apache-2.0"
] | HackReborn/rqalpha | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | 17,514 | Python |
import asyncio
import functools
import logging
from types import FunctionType, ModuleType
from typing import Type
from prometheus_client import Histogram, Counter
logger = logging.getLogger(__name__)
H = Histogram(f"management_layer_call_duration_seconds", "API call duration (s)",
["call"])
def _prometheus_module_metric_decorator(f: FunctionType):
"""
A Prometheus decorator adding timing metrics to a function.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics
"""
module_ = f.__module__.split(".")[-1]
call_key = "{}_{}".format(module_, f.__name__)
@functools.wraps(f)
async def wrapper(*args, **kwargs):
with H.labels(call=call_key).time():
if asyncio.iscoroutinefunction(f):
return await f(*args, **kwargs)
else:
return f(*args, **kwargs)
return wrapper
def _prometheus_class_metric_decorator(f: FunctionType):
"""
A Prometheus decorator adding timing metrics to a function in a class.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics
"""
@functools.wraps(f)
async def wrapper(*args, **kwargs):
with H.labels(call=f.__name__).time():
if asyncio.iscoroutinefunction(f):
return await f(*args, **kwargs)
else:
return f(*args, **kwargs)
return wrapper
def add_prometheus_metrics_for_module(module_: ModuleType):
"""
Convenience function applying the Prometheus metrics decorator to the
specified module's functions.
:param module_: The module to which the instrumentation will be applied
"""
decorate_all_in_module(module_, _prometheus_module_metric_decorator, [])
def add_prometheus_metrics_for_class(klass: Type):
"""
Convenience function applying the Prometheus metrics decorator to the
specified class functions.
:param klass: The class to which the instrumentation will be applied
"""
decorate_all_in_class(klass, _prometheus_class_metric_decorator, [])
def decorate_all_in_module(module_: ModuleType, decorator: FunctionType, whitelist: list):
"""
Decorate all functions in a module with the specified decorator
:param module_: The module to interrogate
:param decorator: The decorator to apply
:param whitelist: Functions not to be decorated.
"""
for name in dir(module_):
if name not in whitelist:
obj = getattr(module_, name)
if isinstance(obj, FunctionType) or asyncio.iscoroutinefunction(obj):
# We only check functions that are defined in the module we
# specified. Some of the functions in the module may have been
# imported from other modules. These are ignored.
if obj.__module__ == module_.__name__:
logger.debug(f"Adding metrics to {module_}:{name}")
setattr(module_, name, decorator(obj))
else:
logger.debug(f"No metrics on {module_}:{name} because it belongs to another "
f"module")
else:
logger.debug(f"No metrics on {module_}:{name} because it is not a coroutine or "
f"function")
def decorate_all_in_class(klass: Type, decorator: FunctionType, whitelist: list):
"""
Decorate all functions in a class with the specified decorator
:param klass: The class to interrogate
:param decorator: The decorator to apply
:param whitelist: Functions not to be decorated.
"""
for name in dir(klass):
if name not in whitelist:
obj = getattr(klass, name)
if isinstance(obj, FunctionType) or asyncio.iscoroutinefunction(obj):
logger.debug(f"Adding metrics to {klass}:{name}")
setattr(klass, name, decorator(obj))
else:
logger.debug(f"No metrics on {klass}:{name} because it is not a coroutine or "
f"function")
| 38.241379 | 97 | 0.656673 | [
"BSD-3-Clause"
] | girleffect/core-management-layer | management_layer/metrics.py | 4,436 | Python |
from typing import Callable
import numpy as np
from manimlib.utils.bezier import bezier
def linear(t: float) -> float:
return t
def smooth(t: float) -> float:
# Zero first and second derivatives at t=0 and t=1.
# Equivalent to bezier([0, 0, 0, 1, 1, 1])
s = 1 - t
return (t**3) * (10 * s * s + 5 * s * t + t * t)
def rush_into(t: float) -> float:
return 2 * smooth(0.5 * t)
def rush_from(t: float) -> float:
return 2 * smooth(0.5 * (t + 1)) - 1
def slow_into(t: float) -> float:
return np.sqrt(1 - (1 - t) * (1 - t))
def double_smooth(t: float) -> float:
if t < 0.5:
return 0.5 * smooth(2 * t)
else:
return 0.5 * (1 + smooth(2 * t - 1))
def there_and_back(t: float) -> float:
new_t = 2 * t if t < 0.5 else 2 * (1 - t)
return smooth(new_t)
def there_and_back_with_pause(t: float, pause_ratio: float = 1. / 3) -> float:
a = 1. / pause_ratio
if t < 0.5 - pause_ratio / 2:
return smooth(a * t)
elif t < 0.5 + pause_ratio / 2:
return 1
else:
return smooth(a - a * t)
def running_start(t: float, pull_factor: float = -0.5) -> float:
return bezier([0, 0, pull_factor, pull_factor, 1, 1, 1])(t)
def not_quite_there(
func: Callable[[float], float] = smooth,
proportion: float = 0.7
) -> Callable[[float], float]:
def result(t):
return proportion * func(t)
return result
def wiggle(t: float, wiggles: float = 2) -> float:
return there_and_back(t) * np.sin(wiggles * np.pi * t)
def squish_rate_func(
func: Callable[[float], float],
a: float = 0.4,
b: float = 0.6
) -> Callable[[float], float]:
def result(t):
if a == b:
return a
elif t < a:
return func(0)
elif t > b:
return func(1)
else:
return func((t - a) / (b - a))
return result
# Stylistically, should this take parameters (with default values)?
# Ultimately, the functionality is entirely subsumed by squish_rate_func,
# but it may be useful to have a nice name for with nice default params for
# "lingering", different from squish_rate_func's default params
def lingering(t: float) -> float:
return squish_rate_func(lambda t: t, 0, 0.8)(t)
def exponential_decay(t: float, half_life: float = 0.1) -> float:
# The half-life should be rather small to minimize
# the cut-off error at the end
return 1 - np.exp(-t / half_life)
| 24.287129 | 78 | 0.593967 | [
"MIT"
] | ListeningPost1379/manim | manimlib/utils/rate_functions.py | 2,453 | Python |
from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
symbols = frozenset([pc, x])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = 11
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x <= 10) -> pc' = 1
cond = mgr.LE(x, ints[10])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x <= 10) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 & (x > 6) -> pc' = 2
cond = mgr.GT(x, ints[6])
cfg.append(mgr.Implies(mgr.And(pcs[1], cond), x_pcs[0]))
# pc = 1 & !(x > 6) -> pc' = 0
cfg.append(mgr.Implies(mgr.And(pcs[1], mgr.Not(cond)), x_pcs[0]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same = mgr.Equals(x_x, x)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> same
trans.append(mgr.Implies(pcs[1], same))
# pc = 2 -> x' = x + 2
trans.append(mgr.Implies(pcs[2], mgr.Equals(x_x, mgr.Plus(x, ints[2]))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
symbs = frozenset([pc, x])
i_5 = mgr.Int(5)
x_x = symb_to_next(mgr, x)
loc = Location(env, mgr.Equals(x, i_5))
loc.set_progress(0, mgr.Equals(x_x, x))
h_x = Hint("h_x", env, frozenset([x]), symbs)
h_x.set_locs([loc])
return frozenset([h_x])
| 26.885417 | 77 | 0.573809 | [
"MIT"
] | EnricoMagnago/F3 | benchmarks/software_nontermination/f3_hints/C_Integer/Stroeder_15/Urban-WST2013-Fig1_false-termination.py | 2,581 | Python |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import datetime
import re
import io
from os import PathLike
from typing import (
Dict,
TYPE_CHECKING,
Sequence,
Union,
List,
Optional,
Any,
Callable,
Tuple,
ClassVar,
Type,
overload,
)
from . import utils
from .reaction import Reaction
from .emoji import Emoji
from .partial_emoji import PartialEmoji
from .enums import InteractionType, MessageType, ChannelType, try_enum
from .errors import HTTPException
from .components import _component_factory
from .embeds import Embed
from .member import Member
from .flags import MessageFlags
from .file import File
from .utils import escape_mentions, MISSING
from .http import handle_message_parameters
from .guild import Guild
from .mixins import Hashable
from .sticker import StickerItem
from .threads import Thread
from .user import User
from .channel import PartialMessageable
if TYPE_CHECKING:
from typing_extensions import Self
from .types.message import (
Message as MessagePayload,
Attachment as AttachmentPayload,
MessageReference as MessageReferencePayload,
MessageApplication as MessageApplicationPayload,
MessageActivity as MessageActivityPayload,
)
from .types.interactions import MessageInteraction as MessageInteractionPayload
from .types.components import Component as ComponentPayload
from .types.threads import ThreadArchiveDuration
from .types.member import (
Member as MemberPayload,
UserWithMember as UserWithMemberPayload,
)
from .types.user import User as UserPayload
from .types.embed import Embed as EmbedPayload
from .types.gateway import MessageReactionRemoveEvent, MessageUpdateEvent
from .abc import Snowflake
from .abc import GuildChannel, MessageableChannel
from .components import Component
from .state import ConnectionState
from .channel import TextChannel
from .mentions import AllowedMentions
from .user import User
from .role import Role
from .ui.view import View
EmojiInputType = Union[Emoji, PartialEmoji, str]
__all__ = (
'Attachment',
'Message',
'PartialMessage',
'MessageInteraction',
'MessageReference',
'DeletedReferencedMessage',
)
def convert_emoji_reaction(emoji: Union[EmojiInputType, Reaction]) -> str:
if isinstance(emoji, Reaction):
emoji = emoji.emoji
if isinstance(emoji, Emoji):
return f'{emoji.name}:{emoji.id}'
if isinstance(emoji, PartialEmoji):
return emoji._as_reaction()
if isinstance(emoji, str):
# Reactions can be in :name:id format, but not <:name:id>.
# No existing emojis have <> in them, so this should be okay.
return emoji.strip('<>')
raise TypeError(f'emoji argument must be str, Emoji, or Reaction not {emoji.__class__.__name__}.')
class Attachment(Hashable):
"""Represents an attachment from Discord.
.. container:: operations
.. describe:: str(x)
Returns the URL of the attachment.
.. describe:: x == y
Checks if the attachment is equal to another attachment.
.. describe:: x != y
Checks if the attachment is not equal to another attachment.
.. describe:: hash(x)
Returns the hash of the attachment.
.. versionchanged:: 1.7
Attachment can now be casted to :class:`str` and is hashable.
Attributes
------------
id: :class:`int`
The attachment ID.
size: :class:`int`
The attachment size in bytes.
height: Optional[:class:`int`]
The attachment's height, in pixels. Only applicable to images and videos.
width: Optional[:class:`int`]
The attachment's width, in pixels. Only applicable to images and videos.
filename: :class:`str`
The attachment's filename.
url: :class:`str`
The attachment URL. If the message this attachment was attached
to is deleted, then this will 404.
proxy_url: :class:`str`
The proxy URL. This is a cached version of the :attr:`~Attachment.url` in the
case of images. When the message is deleted, this URL might be valid for a few
minutes or not valid at all.
content_type: Optional[:class:`str`]
The attachment's `media type <https://en.wikipedia.org/wiki/Media_type>`_
.. versionadded:: 1.7
description: Optional[:class:`str`]
The attachment's description. Only applicable to images.
.. versionadded:: 2.0
ephemeral: :class:`bool`
Whether the attachment is ephemeral.
.. versionadded:: 2.0
"""
__slots__ = (
'id',
'size',
'height',
'width',
'filename',
'url',
'proxy_url',
'_http',
'content_type',
'description',
'ephemeral',
)
def __init__(self, *, data: AttachmentPayload, state: ConnectionState):
self.id: int = int(data['id'])
self.size: int = data['size']
self.height: Optional[int] = data.get('height')
self.width: Optional[int] = data.get('width')
self.filename: str = data['filename']
self.url: str = data['url']
self.proxy_url: str = data['proxy_url']
self._http = state.http
self.content_type: Optional[str] = data.get('content_type')
self.description: Optional[str] = data.get('description')
self.ephemeral: bool = data.get('ephemeral', False)
def is_spoiler(self) -> bool:
""":class:`bool`: Whether this attachment contains a spoiler."""
return self.filename.startswith('SPOILER_')
def __repr__(self) -> str:
return f'<Attachment id={self.id} filename={self.filename!r} url={self.url!r}>'
def __str__(self) -> str:
return self.url or ''
async def save(
self,
fp: Union[io.BufferedIOBase, PathLike[Any]],
*,
seek_begin: bool = True,
use_cached: bool = False,
) -> int:
"""|coro|
Saves this attachment into a file-like object.
Parameters
-----------
fp: Union[:class:`io.BufferedIOBase`, :class:`os.PathLike`]
The file-like object to save this attachment to or the filename
to use. If a filename is passed then a file is created with that
filename and used instead.
seek_begin: :class:`bool`
Whether to seek to the beginning of the file after saving is
successfully done.
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
Raises
--------
HTTPException
Saving the attachment failed.
NotFound
The attachment was deleted.
Returns
--------
:class:`int`
The number of bytes written.
"""
data = await self.read(use_cached=use_cached)
if isinstance(fp, io.BufferedIOBase):
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, 'wb') as f:
return f.write(data)
async def read(self, *, use_cached: bool = False) -> bytes:
"""|coro|
Retrieves the content of this attachment as a :class:`bytes` object.
.. versionadded:: 1.1
Parameters
-----------
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
Raises
------
HTTPException
Downloading the attachment failed.
Forbidden
You do not have permissions to access this attachment
NotFound
The attachment was deleted.
Returns
-------
:class:`bytes`
The contents of the attachment.
"""
url = self.proxy_url if use_cached else self.url
data = await self._http.get_from_cdn(url)
return data
async def to_file(self, *, use_cached: bool = False, spoiler: bool = False) -> File:
"""|coro|
Converts the attachment into a :class:`File` suitable for sending via
:meth:`abc.Messageable.send`.
.. versionadded:: 1.3
Parameters
-----------
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
.. versionadded:: 1.4
spoiler: :class:`bool`
Whether the file is a spoiler.
.. versionadded:: 1.4
Raises
------
HTTPException
Downloading the attachment failed.
Forbidden
You do not have permissions to access this attachment
NotFound
The attachment was deleted.
Returns
-------
:class:`File`
The attachment as a file suitable for sending.
"""
data = await self.read(use_cached=use_cached)
return File(io.BytesIO(data), filename=self.filename, description=self.description, spoiler=spoiler)
def to_dict(self) -> AttachmentPayload:
result: AttachmentPayload = {
'filename': self.filename,
'id': self.id,
'proxy_url': self.proxy_url,
'size': self.size,
'url': self.url,
'spoiler': self.is_spoiler(),
}
if self.height:
result['height'] = self.height
if self.width:
result['width'] = self.width
if self.content_type:
result['content_type'] = self.content_type
if self.description is not None:
result['description'] = self.description
return result
class DeletedReferencedMessage:
"""A special sentinel type given when the resolved message reference
points to a deleted message.
The purpose of this class is to separate referenced messages that could not be
fetched and those that were previously fetched but have since been deleted.
.. versionadded:: 1.6
"""
__slots__ = ('_parent',)
def __init__(self, parent: MessageReference):
self._parent: MessageReference = parent
def __repr__(self) -> str:
return f"<DeletedReferencedMessage id={self.id} channel_id={self.channel_id} guild_id={self.guild_id!r}>"
@property
def id(self) -> int:
""":class:`int`: The message ID of the deleted referenced message."""
# the parent's message id won't be None here
return self._parent.message_id # type: ignore
@property
def channel_id(self) -> int:
""":class:`int`: The channel ID of the deleted referenced message."""
return self._parent.channel_id
@property
def guild_id(self) -> Optional[int]:
"""Optional[:class:`int`]: The guild ID of the deleted referenced message."""
return self._parent.guild_id
class MessageReference:
"""Represents a reference to a :class:`~discord.Message`.
.. versionadded:: 1.5
.. versionchanged:: 1.6
This class can now be constructed by users.
Attributes
-----------
message_id: Optional[:class:`int`]
The id of the message referenced.
channel_id: :class:`int`
The channel id of the message referenced.
guild_id: Optional[:class:`int`]
The guild id of the message referenced.
fail_if_not_exists: :class:`bool`
Whether replying to the referenced message should raise :class:`HTTPException`
if the message no longer exists or Discord could not fetch the message.
.. versionadded:: 1.7
resolved: Optional[Union[:class:`Message`, :class:`DeletedReferencedMessage`]]
The message that this reference resolved to. If this is ``None``
then the original message was not fetched either due to the Discord API
not attempting to resolve it or it not being available at the time of creation.
If the message was resolved at a prior point but has since been deleted then
this will be of type :class:`DeletedReferencedMessage`.
Currently, this is mainly the replied to message when a user replies to a message.
.. versionadded:: 1.6
"""
__slots__ = ('message_id', 'channel_id', 'guild_id', 'fail_if_not_exists', 'resolved', '_state')
def __init__(self, *, message_id: int, channel_id: int, guild_id: Optional[int] = None, fail_if_not_exists: bool = True):
self._state: Optional[ConnectionState] = None
self.resolved: Optional[Union[Message, DeletedReferencedMessage]] = None
self.message_id: Optional[int] = message_id
self.channel_id: int = channel_id
self.guild_id: Optional[int] = guild_id
self.fail_if_not_exists: bool = fail_if_not_exists
@classmethod
def with_state(cls, state: ConnectionState, data: MessageReferencePayload) -> Self:
self = cls.__new__(cls)
self.message_id = utils._get_as_snowflake(data, 'message_id')
self.channel_id = int(data.pop('channel_id'))
self.guild_id = utils._get_as_snowflake(data, 'guild_id')
self.fail_if_not_exists = data.get('fail_if_not_exists', True)
self._state = state
self.resolved = None
return self
@classmethod
def from_message(cls, message: PartialMessage, *, fail_if_not_exists: bool = True) -> Self:
"""Creates a :class:`MessageReference` from an existing :class:`~discord.Message`.
.. versionadded:: 1.6
Parameters
----------
message: :class:`~discord.Message`
The message to be converted into a reference.
fail_if_not_exists: :class:`bool`
Whether replying to the referenced message should raise :class:`HTTPException`
if the message no longer exists or Discord could not fetch the message.
.. versionadded:: 1.7
Returns
-------
:class:`MessageReference`
A reference to the message.
"""
self = cls(
message_id=message.id,
channel_id=message.channel.id,
guild_id=getattr(message.guild, 'id', None),
fail_if_not_exists=fail_if_not_exists,
)
self._state = message._state
return self
@property
def cached_message(self) -> Optional[Message]:
"""Optional[:class:`~discord.Message`]: The cached message, if found in the internal message cache."""
return self._state and self._state._get_message(self.message_id)
@property
def jump_url(self) -> str:
""":class:`str`: Returns a URL that allows the client to jump to the referenced message.
.. versionadded:: 1.7
"""
guild_id = self.guild_id if self.guild_id is not None else '@me'
return f'https://discord.com/channels/{guild_id}/{self.channel_id}/{self.message_id}'
def __repr__(self) -> str:
return f'<MessageReference message_id={self.message_id!r} channel_id={self.channel_id!r} guild_id={self.guild_id!r}>'
def to_dict(self) -> MessageReferencePayload:
result: Dict[str, Any] = {'message_id': self.message_id} if self.message_id is not None else {}
result['channel_id'] = self.channel_id
if self.guild_id is not None:
result['guild_id'] = self.guild_id
if self.fail_if_not_exists is not None:
result['fail_if_not_exists'] = self.fail_if_not_exists
return result # type: ignore # Type checker doesn't understand these are the same.
to_message_reference_dict = to_dict
class MessageInteraction(Hashable):
"""Represents the interaction that a :class:`Message` is a response to.
.. versionadded:: 2.0
.. container:: operations
.. describe:: x == y
Checks if two message interactions are equal.
.. describe:: x != y
Checks if two message interactions are not equal.
.. describe:: hash(x)
Returns the message interaction's hash.
Attributes
-----------
id: :class:`int`
The interaction ID.
type: :class:`InteractionType`
The interaction type.
name: :class:`str`
The name of the interaction.
user: Union[:class:`User`, :class:`Member`]
The user or member that invoked the interaction.
"""
__slots__: Tuple[str, ...] = ('id', 'type', 'name', 'user')
def __init__(self, *, state: ConnectionState, guild: Optional[Guild], data: MessageInteractionPayload) -> None:
self.id: int = int(data['id'])
self.type: InteractionType = try_enum(InteractionType, data['type'])
self.name: str = data['name']
self.user: Union[User, Member] = MISSING
try:
payload = data['member']
except KeyError:
self.user = state.create_user(data['user'])
else:
if guild is None:
# This is an unfortunate data loss, but it's better than giving bad data
# This is also an incredibly rare scenario.
self.user = state.create_user(data['user'])
else:
payload['user'] = data['user']
self.user = Member(data=payload, guild=guild, state=state) # type: ignore
def __repr__(self) -> str:
return f'<MessageInteraction id={self.id} name={self.name!r} type={self.type!r} user={self.user!r}>'
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: The interaction's creation time in UTC."""
return utils.snowflake_time(self.id)
def flatten_handlers(cls: Type[Message]) -> Type[Message]:
prefix = len('_handle_')
handlers = [
(key[prefix:], value)
for key, value in cls.__dict__.items()
if key.startswith('_handle_') and key != '_handle_member'
]
# store _handle_member last
handlers.append(('member', cls._handle_member))
cls._HANDLERS = handlers
cls._CACHED_SLOTS = [attr for attr in cls.__slots__ if attr.startswith('_cs_')]
return cls
class PartialMessage(Hashable):
"""Represents a partial message to aid with working messages when only
a message and channel ID are present.
There are two ways to construct this class. The first one is through
the constructor itself, and the second is via the following:
- :meth:`TextChannel.get_partial_message`
- :meth:`VoiceChannel.get_partial_message`
- :meth:`Thread.get_partial_message`
- :meth:`DMChannel.get_partial_message`
Note that this class is trimmed down and has no rich attributes.
.. versionadded:: 1.6
.. container:: operations
.. describe:: x == y
Checks if two partial messages are equal.
.. describe:: x != y
Checks if two partial messages are not equal.
.. describe:: hash(x)
Returns the partial message's hash.
Attributes
-----------
channel: Union[:class:`PartialMessageable`, :class:`TextChannel`, :class:`VoiceChannel`, :class:`Thread`, :class:`DMChannel`]
The channel associated with this partial message.
id: :class:`int`
The message ID.
guild: Optional[:class:`Guild`]
The guild that the partial message belongs to, if applicable.
"""
__slots__ = ('channel', 'id', '_cs_guild', '_state', 'guild')
def __init__(self, *, channel: MessageableChannel, id: int) -> None:
if not isinstance(channel, PartialMessageable) and channel.type not in (
ChannelType.text,
ChannelType.voice,
ChannelType.news,
ChannelType.private,
ChannelType.news_thread,
ChannelType.public_thread,
ChannelType.private_thread,
):
raise TypeError(
f'expected PartialMessageable, TextChannel, VoiceChannel, DMChannel or Thread not {type(channel)!r}'
)
self.channel: MessageableChannel = channel
self._state: ConnectionState = channel._state
self.id: int = id
self.guild: Optional[Guild] = getattr(channel, 'guild', None)
def _update(self, data: MessageUpdateEvent) -> None:
# This is used for duck typing purposes.
# Just do nothing with the data.
pass
# Also needed for duck typing purposes
# n.b. not exposed
pinned: Any = property(None, lambda x, y: None)
def __repr__(self) -> str:
return f'<PartialMessage id={self.id} channel={self.channel!r}>'
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: The partial message's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def jump_url(self) -> str:
""":class:`str`: Returns a URL that allows the client to jump to this message."""
guild_id = getattr(self.guild, 'id', '@me')
return f'https://discord.com/channels/{guild_id}/{self.channel.id}/{self.id}'
async def fetch(self) -> Message:
"""|coro|
Fetches the partial message to a full :class:`Message`.
Raises
--------
NotFound
The message was not found.
Forbidden
You do not have the permissions required to get a message.
HTTPException
Retrieving the message failed.
Returns
--------
:class:`Message`
The full message.
"""
data = await self._state.http.get_message(self.channel.id, self.id)
return self._state.create_message(channel=self.channel, data=data)
async def delete(self, *, delay: Optional[float] = None) -> None:
"""|coro|
Deletes the message.
Your own messages could be deleted without any proper permissions. However to
delete other people's messages, you need the :attr:`~Permissions.manage_messages`
permission.
.. versionchanged:: 1.1
Added the new ``delay`` keyword-only parameter.
Parameters
-----------
delay: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message. If the deletion fails then it is silently ignored.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
NotFound
The message was deleted already
HTTPException
Deleting the message failed.
"""
if delay is not None:
async def delete(delay: float):
await asyncio.sleep(delay)
try:
await self._state.http.delete_message(self.channel.id, self.id)
except HTTPException:
pass
asyncio.create_task(delete(delay))
else:
await self._state.http.delete_message(self.channel.id, self.id)
@overload
async def edit(
self,
*,
content: Optional[str] = ...,
embed: Optional[Embed] = ...,
attachments: Sequence[Union[Attachment, File]] = ...,
delete_after: Optional[float] = ...,
allowed_mentions: Optional[AllowedMentions] = ...,
view: Optional[View] = ...,
) -> Message:
...
@overload
async def edit(
self,
*,
content: Optional[str] = ...,
embeds: Sequence[Embed] = ...,
attachments: Sequence[Union[Attachment, File]] = ...,
delete_after: Optional[float] = ...,
allowed_mentions: Optional[AllowedMentions] = ...,
view: Optional[View] = ...,
) -> Message:
...
async def edit(
self,
content: Optional[str] = MISSING,
embed: Optional[Embed] = MISSING,
embeds: Sequence[Embed] = MISSING,
attachments: Sequence[Union[Attachment, File]] = MISSING,
delete_after: Optional[float] = None,
allowed_mentions: Optional[AllowedMentions] = MISSING,
view: Optional[View] = MISSING,
) -> Message:
"""|coro|
Edits the message.
The content must be able to be transformed into a string via ``str(content)``.
.. versionchanged:: 2.0
Edits are no longer in-place, the newly edited message is returned instead.
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` instead of
``InvalidArgument``.
Parameters
-----------
content: Optional[:class:`str`]
The new content to replace the message with.
Could be ``None`` to remove the content.
embed: Optional[:class:`Embed`]
The new embed to replace the original with.
Could be ``None`` to remove the embed.
embeds: List[:class:`Embed`]
The new embeds to replace the original with. Must be a maximum of 10.
To remove all embeds ``[]`` should be passed.
.. versionadded:: 2.0
attachments: List[Union[:class:`Attachment`, :class:`File`]]
A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed
then all attachments are removed.
.. note::
New files will always appear after current attachments.
.. versionadded:: 2.0
delete_after: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message we just edited. If the deletion fails,
then it is silently ignored.
allowed_mentions: Optional[:class:`~discord.AllowedMentions`]
Controls the mentions being processed in this message. If this is
passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.
The merging behaviour only overrides attributes that have been explicitly passed
to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.
If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`
are used instead.
.. versionadded:: 1.4
view: Optional[:class:`~discord.ui.View`]
The updated view to update this message with. If ``None`` is passed then
the view is removed.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to suppress a message without permissions or
edited a message's content or embed that isn't yours.
TypeError
You specified both ``embed`` and ``embeds``
Returns
--------
:class:`Message`
The newly edited message.
"""
if content is not MISSING:
previous_allowed_mentions = self._state.allowed_mentions
else:
previous_allowed_mentions = None
if view is not MISSING:
self._state.prevent_view_updates_for(self.id)
params = handle_message_parameters(
content=content,
embed=embed,
embeds=embeds,
attachments=attachments,
view=view,
allowed_mentions=allowed_mentions,
previous_allowed_mentions=previous_allowed_mentions,
)
data = await self._state.http.edit_message(self.channel.id, self.id, params=params)
message = Message(state=self._state, channel=self.channel, data=data)
if view and not view.is_finished():
self._state.store_view(view, self.id)
if delete_after is not None:
await self.delete(delay=delete_after)
return message
async def publish(self) -> None:
"""|coro|
Publishes this message to your announcement channel.
You must have the :attr:`~Permissions.send_messages` permission to do this.
If the message is not your own then the :attr:`~Permissions.manage_messages`
permission is also needed.
Raises
-------
Forbidden
You do not have the proper permissions to publish this message.
HTTPException
Publishing the message failed.
"""
await self._state.http.publish_message(self.channel.id, self.id)
async def pin(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Pins the message.
You must have the :attr:`~Permissions.manage_messages` permission to do
this in a non-private channel context.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for pinning the message. Shows up on the audit log.
.. versionadded:: 1.4
Raises
-------
Forbidden
You do not have permissions to pin the message.
NotFound
The message or channel was not found or deleted.
HTTPException
Pinning the message failed, probably due to the channel
having more than 50 pinned messages.
"""
await self._state.http.pin_message(self.channel.id, self.id, reason=reason)
# pinned exists on PartialMessage for duck typing purposes
self.pinned = True
async def unpin(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Unpins the message.
You must have the :attr:`~Permissions.manage_messages` permission to do
this in a non-private channel context.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for unpinning the message. Shows up on the audit log.
.. versionadded:: 1.4
Raises
-------
Forbidden
You do not have permissions to unpin the message.
NotFound
The message or channel was not found or deleted.
HTTPException
Unpinning the message failed.
"""
await self._state.http.unpin_message(self.channel.id, self.id, reason=reason)
# pinned exists on PartialMessage for duck typing purposes
self.pinned = False
async def add_reaction(self, emoji: EmojiInputType, /) -> None:
"""|coro|
Adds a reaction to the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
You must have the :attr:`~Permissions.read_message_history` permission
to use this. If nobody else has reacted to the message using this
emoji, the :attr:`~Permissions.add_reactions` permission is required.
.. versionchanged:: 2.0
``emoji`` parameter is now positional-only.
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` instead of
``InvalidArgument``.
Parameters
------------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to react with.
Raises
--------
HTTPException
Adding the reaction failed.
Forbidden
You do not have the proper permissions to react to the message.
NotFound
The emoji you specified was not found.
TypeError
The emoji parameter is invalid.
"""
emoji = convert_emoji_reaction(emoji)
await self._state.http.add_reaction(self.channel.id, self.id, emoji)
async def remove_reaction(self, emoji: Union[EmojiInputType, Reaction], member: Snowflake) -> None:
"""|coro|
Remove a reaction by the member from the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
If the reaction is not your own (i.e. ``member`` parameter is not you) then
the :attr:`~Permissions.manage_messages` permission is needed.
The ``member`` parameter must represent a member and meet
the :class:`abc.Snowflake` abc.
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` instead of
``InvalidArgument``.
Parameters
------------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to remove.
member: :class:`abc.Snowflake`
The member for which to remove the reaction.
Raises
--------
HTTPException
Removing the reaction failed.
Forbidden
You do not have the proper permissions to remove the reaction.
NotFound
The member or emoji you specified was not found.
TypeError
The emoji parameter is invalid.
"""
emoji = convert_emoji_reaction(emoji)
if member.id == self._state.self_id:
await self._state.http.remove_own_reaction(self.channel.id, self.id, emoji)
else:
await self._state.http.remove_reaction(self.channel.id, self.id, emoji, member.id)
async def clear_reaction(self, emoji: Union[EmojiInputType, Reaction]) -> None:
"""|coro|
Clears a specific reaction from the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
You need the :attr:`~Permissions.manage_messages` permission to use this.
.. versionadded:: 1.3
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` instead of
``InvalidArgument``.
Parameters
-----------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to clear.
Raises
--------
HTTPException
Clearing the reaction failed.
Forbidden
You do not have the proper permissions to clear the reaction.
NotFound
The emoji you specified was not found.
TypeError
The emoji parameter is invalid.
"""
emoji = convert_emoji_reaction(emoji)
await self._state.http.clear_single_reaction(self.channel.id, self.id, emoji)
async def clear_reactions(self) -> None:
"""|coro|
Removes all the reactions from the message.
You need the :attr:`~Permissions.manage_messages` permission to use this.
Raises
--------
HTTPException
Removing the reactions failed.
Forbidden
You do not have the proper permissions to remove all the reactions.
"""
await self._state.http.clear_reactions(self.channel.id, self.id)
async def create_thread(
self,
*,
name: str,
auto_archive_duration: ThreadArchiveDuration = MISSING,
slowmode_delay: Optional[int] = None,
reason: Optional[str] = None,
) -> Thread:
"""|coro|
Creates a public thread from this message.
You must have :attr:`~discord.Permissions.create_public_threads` in order to
create a public thread from a message.
The channel this message belongs in must be a :class:`TextChannel`.
.. versionadded:: 2.0
Parameters
-----------
name: :class:`str`
The name of the thread.
auto_archive_duration: :class:`int`
The duration in minutes before a thread is automatically archived for inactivity.
If not provided, the channel's default auto archive duration is used.
slowmode_delay: Optional[:class:`int`]
Specifies the slowmode rate limit for user in this channel, in seconds.
The maximum value possible is `21600`. By default no slowmode rate limit
if this is ``None``.
reason: Optional[:class:`str`]
The reason for creating a new thread. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to create a thread.
HTTPException
Creating the thread failed.
ValueError
This message does not have guild info attached.
Returns
--------
:class:`.Thread`
The created thread.
"""
if self.guild is None:
raise ValueError('This message does not have guild info attached.')
default_auto_archive_duration: ThreadArchiveDuration = getattr(self.channel, 'default_auto_archive_duration', 1440)
data = await self._state.http.start_thread_with_message(
self.channel.id,
self.id,
name=name,
auto_archive_duration=auto_archive_duration or default_auto_archive_duration,
rate_limit_per_user=slowmode_delay,
reason=reason,
)
return Thread(guild=self.guild, state=self._state, data=data)
async def reply(self, content: Optional[str] = None, **kwargs: Any) -> Message:
"""|coro|
A shortcut method to :meth:`.abc.Messageable.send` to reply to the
:class:`.Message`.
.. versionadded:: 1.6
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` or
:exc:`ValueError` instead of ``InvalidArgument``.
Raises
--------
~discord.HTTPException
Sending the message failed.
~discord.Forbidden
You do not have the proper permissions to send the message.
ValueError
The ``files`` list is not of the appropriate size
TypeError
You specified both ``file`` and ``files``.
Returns
---------
:class:`.Message`
The message that was sent.
"""
return await self.channel.send(content, reference=self, **kwargs)
def to_reference(self, *, fail_if_not_exists: bool = True) -> MessageReference:
"""Creates a :class:`~discord.MessageReference` from the current message.
.. versionadded:: 1.6
Parameters
----------
fail_if_not_exists: :class:`bool`
Whether replying using the message reference should raise :class:`HTTPException`
if the message no longer exists or Discord could not fetch the message.
.. versionadded:: 1.7
Returns
---------
:class:`~discord.MessageReference`
The reference to this message.
"""
return MessageReference.from_message(self, fail_if_not_exists=fail_if_not_exists)
def to_message_reference_dict(self) -> MessageReferencePayload:
data: MessageReferencePayload = {
'message_id': self.id,
'channel_id': self.channel.id,
}
if self.guild is not None:
data['guild_id'] = self.guild.id
return data
@flatten_handlers
class Message(PartialMessage, Hashable):
r"""Represents a message from Discord.
.. container:: operations
.. describe:: x == y
Checks if two messages are equal.
.. describe:: x != y
Checks if two messages are not equal.
.. describe:: hash(x)
Returns the message's hash.
Attributes
-----------
tts: :class:`bool`
Specifies if the message was done with text-to-speech.
This can only be accurately received in :func:`on_message` due to
a discord limitation.
type: :class:`MessageType`
The type of message. In most cases this should not be checked, but it is helpful
in cases where it might be a system message for :attr:`system_content`.
author: Union[:class:`Member`, :class:`abc.User`]
A :class:`Member` that sent the message. If :attr:`channel` is a
private channel or the user has the left the guild, then it is a :class:`User` instead.
content: :class:`str`
The actual contents of the message.
nonce: Optional[Union[:class:`str`, :class:`int`]]
The value used by the discord guild and the client to verify that the message is successfully sent.
This is not stored long term within Discord's servers and is only used ephemerally.
embeds: List[:class:`Embed`]
A list of embeds the message has.
channel: Union[:class:`TextChannel`, :class:`VoiceChannel`, :class:`Thread`, :class:`DMChannel`, :class:`GroupChannel`, :class:`PartialMessageable`]
The :class:`TextChannel` or :class:`Thread` that the message was sent from.
Could be a :class:`DMChannel` or :class:`GroupChannel` if it's a private message.
reference: Optional[:class:`~discord.MessageReference`]
The message that this message references. This is only applicable to messages of
type :attr:`MessageType.pins_add`, crossposted messages created by a
followed channel integration, or message replies.
.. versionadded:: 1.5
mention_everyone: :class:`bool`
Specifies if the message mentions everyone.
.. note::
This does not check if the ``@everyone`` or the ``@here`` text is in the message itself.
Rather this boolean indicates if either the ``@everyone`` or the ``@here`` text is in the message
**and** it did end up mentioning.
mentions: List[:class:`abc.User`]
A list of :class:`Member` that were mentioned. If the message is in a private message
then the list will be of :class:`User` instead. For messages that are not of type
:attr:`MessageType.default`\, this array can be used to aid in system messages.
For more information, see :attr:`system_content`.
.. warning::
The order of the mentions list is not in any particular order so you should
not rely on it. This is a Discord limitation, not one with the library.
channel_mentions: List[Union[:class:`abc.GuildChannel`, :class:`Thread`]]
A list of :class:`abc.GuildChannel` or :class:`Thread` that were mentioned. If the message is
in a private message then the list is always empty.
role_mentions: List[:class:`Role`]
A list of :class:`Role` that were mentioned. If the message is in a private message
then the list is always empty.
id: :class:`int`
The message ID.
webhook_id: Optional[:class:`int`]
If this message was sent by a webhook, then this is the webhook ID's that sent this
message.
attachments: List[:class:`Attachment`]
A list of attachments given to a message.
pinned: :class:`bool`
Specifies if the message is currently pinned.
flags: :class:`MessageFlags`
Extra features of the message.
.. versionadded:: 1.3
reactions : List[:class:`Reaction`]
Reactions to a message. Reactions can be either custom emoji or standard unicode emoji.
activity: Optional[:class:`dict`]
The activity associated with this message. Sent with Rich-Presence related messages that for
example, request joining, spectating, or listening to or with another member.
It is a dictionary with the following optional keys:
- ``type``: An integer denoting the type of message activity being requested.
- ``party_id``: The party ID associated with the party.
application: Optional[:class:`dict`]
The rich presence enabled application associated with this message.
It is a dictionary with the following keys:
- ``id``: A string representing the application's ID.
- ``name``: A string representing the application's name.
- ``description``: A string representing the application's description.
- ``icon``: A string representing the icon ID of the application.
- ``cover_image``: A string representing the embed's image asset ID.
stickers: List[:class:`StickerItem`]
A list of sticker items given to the message.
.. versionadded:: 1.6
components: List[:class:`Component`]
A list of components in the message.
.. versionadded:: 2.0
interaction: Optional[:class:`MessageInteraction`]
The interaction that this message is a response to.
.. versionadded:: 2.0
guild: Optional[:class:`Guild`]
The guild that the message belongs to, if applicable.
"""
__slots__ = (
'_state',
'_edited_timestamp',
'_cs_channel_mentions',
'_cs_raw_mentions',
'_cs_clean_content',
'_cs_raw_channel_mentions',
'_cs_raw_role_mentions',
'_cs_system_content',
'tts',
'content',
'channel',
'webhook_id',
'mention_everyone',
'embeds',
'mentions',
'author',
'attachments',
'nonce',
'pinned',
'role_mentions',
'type',
'flags',
'reactions',
'reference',
'application',
'activity',
'stickers',
'components',
'interaction',
)
if TYPE_CHECKING:
_HANDLERS: ClassVar[List[Tuple[str, Callable[..., None]]]]
_CACHED_SLOTS: ClassVar[List[str]]
# guild: Optional[Guild]
reference: Optional[MessageReference]
mentions: List[Union[User, Member]]
author: Union[User, Member]
role_mentions: List[Role]
def __init__(
self,
*,
state: ConnectionState,
channel: MessageableChannel,
data: MessagePayload,
) -> None:
self.channel: MessageableChannel = channel
self.id: int = int(data['id'])
self._state: ConnectionState = state
self.webhook_id: Optional[int] = utils._get_as_snowflake(data, 'webhook_id')
self.reactions: List[Reaction] = [Reaction(message=self, data=d) for d in data.get('reactions', [])]
self.attachments: List[Attachment] = [Attachment(data=a, state=self._state) for a in data['attachments']]
self.embeds: List[Embed] = [Embed.from_dict(a) for a in data['embeds']]
self.application: Optional[MessageApplicationPayload] = data.get('application')
self.activity: Optional[MessageActivityPayload] = data.get('activity')
self.channel: MessageableChannel = channel
self._edited_timestamp: Optional[datetime.datetime] = utils.parse_time(data['edited_timestamp'])
self.type: MessageType = try_enum(MessageType, data['type'])
self.pinned: bool = data['pinned']
self.flags: MessageFlags = MessageFlags._from_value(data.get('flags', 0))
self.mention_everyone: bool = data['mention_everyone']
self.tts: bool = data['tts']
self.content: str = data['content']
self.nonce: Optional[Union[int, str]] = data.get('nonce')
self.stickers: List[StickerItem] = [StickerItem(data=d, state=state) for d in data.get('sticker_items', [])]
self.components: List[Component] = [_component_factory(d) for d in data.get('components', [])]
try:
# if the channel doesn't have a guild attribute, we handle that
self.guild = channel.guild # type: ignore
except AttributeError:
self.guild = state._get_guild(utils._get_as_snowflake(data, 'guild_id'))
self.interaction: Optional[MessageInteraction] = None
try:
interaction = data['interaction']
except KeyError:
pass
else:
self.interaction = MessageInteraction(state=state, guild=self.guild, data=interaction)
try:
ref = data['message_reference']
except KeyError:
self.reference = None
else:
self.reference = ref = MessageReference.with_state(state, ref)
try:
resolved = data['referenced_message']
except KeyError:
pass
else:
if resolved is None:
ref.resolved = DeletedReferencedMessage(ref)
else:
# Right now the channel IDs match but maybe in the future they won't.
if ref.channel_id == channel.id:
chan = channel
elif isinstance(channel, Thread) and channel.parent_id == ref.channel_id:
chan = channel
else:
chan, _ = state._get_guild_channel(resolved, ref.guild_id)
# the channel will be the correct type here
ref.resolved = self.__class__(channel=chan, data=resolved, state=state) # type: ignore
for handler in ('author', 'member', 'mentions', 'mention_roles'):
try:
getattr(self, f'_handle_{handler}')(data[handler])
except KeyError:
continue
def __repr__(self) -> str:
name = self.__class__.__name__
return (
f'<{name} id={self.id} channel={self.channel!r} type={self.type!r} author={self.author!r} flags={self.flags!r}>'
)
def _try_patch(self, data, key, transform=None) -> None:
try:
value = data[key]
except KeyError:
pass
else:
if transform is None:
setattr(self, key, value)
else:
setattr(self, key, transform(value))
def _add_reaction(self, data, emoji, user_id) -> Reaction:
reaction = utils.find(lambda r: r.emoji == emoji, self.reactions)
is_me = data['me'] = user_id == self._state.self_id
if reaction is None:
reaction = Reaction(message=self, data=data, emoji=emoji)
self.reactions.append(reaction)
else:
reaction.count += 1
if is_me:
reaction.me = is_me
return reaction
def _remove_reaction(self, data: MessageReactionRemoveEvent, emoji: EmojiInputType, user_id: int) -> Reaction:
reaction = utils.find(lambda r: r.emoji == emoji, self.reactions)
if reaction is None:
# already removed?
raise ValueError('Emoji already removed?')
# if reaction isn't in the list, we crash. This means discord
# sent bad data, or we stored improperly
reaction.count -= 1
if user_id == self._state.self_id:
reaction.me = False
if reaction.count == 0:
# this raises ValueError if something went wrong as well.
self.reactions.remove(reaction)
return reaction
def _clear_emoji(self, emoji: PartialEmoji) -> Optional[Reaction]:
to_check = str(emoji)
for index, reaction in enumerate(self.reactions):
if str(reaction.emoji) == to_check:
break
else:
# didn't find anything so just return
return
del self.reactions[index]
return reaction
def _update(self, data: MessageUpdateEvent) -> None:
# In an update scheme, 'author' key has to be handled before 'member'
# otherwise they overwrite each other which is undesirable.
# Since there's no good way to do this we have to iterate over every
# handler rather than iterating over the keys which is a little slower
for key, handler in self._HANDLERS:
try:
value = data[key]
except KeyError:
continue
else:
handler(self, value)
# clear the cached properties
for attr in self._CACHED_SLOTS:
try:
delattr(self, attr)
except AttributeError:
pass
def _handle_edited_timestamp(self, value: str) -> None:
self._edited_timestamp = utils.parse_time(value)
def _handle_pinned(self, value: bool) -> None:
self.pinned = value
def _handle_flags(self, value: int) -> None:
self.flags = MessageFlags._from_value(value)
def _handle_application(self, value: MessageApplicationPayload) -> None:
self.application = value
def _handle_activity(self, value: MessageActivityPayload) -> None:
self.activity = value
def _handle_mention_everyone(self, value: bool) -> None:
self.mention_everyone = value
def _handle_tts(self, value: bool) -> None:
self.tts = value
def _handle_type(self, value: int) -> None:
self.type = try_enum(MessageType, value)
def _handle_content(self, value: str) -> None:
self.content = value
def _handle_attachments(self, value: List[AttachmentPayload]) -> None:
self.attachments = [Attachment(data=a, state=self._state) for a in value]
def _handle_embeds(self, value: List[EmbedPayload]) -> None:
self.embeds = [Embed.from_dict(data) for data in value]
def _handle_nonce(self, value: Union[str, int]) -> None:
self.nonce = value
def _handle_author(self, author: UserPayload) -> None:
self.author = User(state=self._state, data=author)
def _handle_member(self, member: MemberPayload) -> None:
member["user"] = self.author._to_minimal_user_json()
self.author = Member(data=member, guild=self.guild, state=self._state)
def _handle_mentions(self, mentions: List[UserWithMemberPayload]) -> None:
self.mentions = r = []
guild = self.guild
state = self._state
if not isinstance(guild, Guild):
self.mentions = [state.store_user(m) for m in mentions]
return
for mention in filter(None, mentions):
id_search = int(mention['id'])
member = guild.get_member(id_search)
if member is not None:
r.append(member)
else:
r.append(Member._try_upgrade(data=mention, guild=guild, state=state))
def _handle_mention_roles(self, role_mentions: List[int]) -> None:
self.role_mentions = []
if isinstance(self.guild, Guild):
for role_id in map(int, role_mentions):
role = self.guild.get_role(role_id)
if role is not None:
self.role_mentions.append(role)
def _handle_components(self, components: List[ComponentPayload]):
pass
def _handle_interaction(self, data: MessageInteractionPayload):
self.interaction = MessageInteraction(state=self._state, guild=self.guild, data=data)
def _rebind_cached_references(self, new_guild: Guild, new_channel: Union[TextChannel, Thread]) -> None:
self.guild = new_guild
self.channel = new_channel
@utils.cached_slot_property('_cs_raw_mentions')
def raw_mentions(self) -> List[int]:
"""List[:class:`int`]: A property that returns an array of user IDs matched with
the syntax of ``<@user_id>`` in the message content.
This allows you to receive the user IDs of mentioned users
even in a private message context.
"""
return [int(x) for x in re.findall(r'<@!?([0-9]{15,20})>', self.content)]
@utils.cached_slot_property('_cs_raw_channel_mentions')
def raw_channel_mentions(self) -> List[int]:
"""List[:class:`int`]: A property that returns an array of channel IDs matched with
the syntax of ``<#channel_id>`` in the message content.
"""
return [int(x) for x in re.findall(r'<#([0-9]{15,20})>', self.content)]
@utils.cached_slot_property('_cs_raw_role_mentions')
def raw_role_mentions(self) -> List[int]:
"""List[:class:`int`]: A property that returns an array of role IDs matched with
the syntax of ``<@&role_id>`` in the message content.
"""
return [int(x) for x in re.findall(r'<@&([0-9]{15,20})>', self.content)]
@utils.cached_slot_property('_cs_channel_mentions')
def channel_mentions(self) -> List[Union[GuildChannel, Thread]]:
if self.guild is None:
return []
it = filter(None, map(self.guild._resolve_channel, self.raw_channel_mentions))
return utils._unique(it)
@utils.cached_slot_property('_cs_clean_content')
def clean_content(self) -> str:
""":class:`str`: A property that returns the content in a "cleaned up"
manner. This basically means that mentions are transformed
into the way the client shows it. e.g. ``<#id>`` will transform
into ``#name``.
This will also transform @everyone and @here mentions into
non-mentions.
.. note::
This *does not* affect markdown. If you want to escape
or remove markdown then use :func:`utils.escape_markdown` or :func:`utils.remove_markdown`
respectively, along with this function.
"""
if self.guild:
def resolve_member(id: int) -> str:
m = self.guild.get_member(id) or utils.get(self.mentions, id=id) # type: ignore
return f'@{m.display_name}' if m else '@deleted-user'
def resolve_role(id: int) -> str:
r = self.guild.get_role(id) or utils.get(self.role_mentions, id=id) # type: ignore
return f'@{r.name}' if r else '@deleted-role'
def resolve_channel(id: int) -> str:
c = self.guild._resolve_channel(id) # type: ignore
return f'#{c.name}' if c else '#deleted-channel'
else:
def resolve_member(id: int) -> str:
m = utils.get(self.mentions, id=id)
return f'@{m.display_name}' if m else '@deleted-user'
def resolve_role(id: int) -> str:
return '@deleted-role'
def resolve_channel(id: int) -> str:
return f'#deleted-channel'
transforms = {
'@': resolve_member,
'@!': resolve_member,
'#': resolve_channel,
'@&': resolve_role,
}
def repl(match: re.Match) -> str:
type = match[1]
id = int(match[2])
transformed = transforms[type](id)
return transformed
result = re.sub(r'<(@[!&]?|#)([0-9]{15,20})>', repl, self.content)
return escape_mentions(result)
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: The message's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def edited_at(self) -> Optional[datetime.datetime]:
"""Optional[:class:`datetime.datetime`]: An aware UTC datetime object containing the edited time of the message."""
return self._edited_timestamp
def is_system(self) -> bool:
""":class:`bool`: Whether the message is a system message.
A system message is a message that is constructed entirely by the Discord API
in response to something.
.. versionadded:: 1.3
"""
return self.type not in (
MessageType.default,
MessageType.reply,
MessageType.chat_input_command,
MessageType.context_menu_command,
MessageType.thread_starter_message,
)
@utils.cached_slot_property('_cs_system_content')
def system_content(self) -> Optional[str]:
r""":class:`str`: A property that returns the content that is rendered
regardless of the :attr:`Message.type`.
In the case of :attr:`MessageType.default` and :attr:`MessageType.reply`\,
this just returns the regular :attr:`Message.content`. Otherwise this
returns an English message denoting the contents of the system message.
"""
if self.type is MessageType.default:
return self.content
if self.type is MessageType.recipient_add:
if self.channel.type is ChannelType.group:
return f'{self.author.name} added {self.mentions[0].name} to the group.'
else:
return f'{self.author.name} added {self.mentions[0].name} to the thread.'
if self.type is MessageType.recipient_remove:
if self.channel.type is ChannelType.group:
return f'{self.author.name} removed {self.mentions[0].name} from the group.'
else:
return f'{self.author.name} removed {self.mentions[0].name} from the thread.'
if self.type is MessageType.channel_name_change:
return f'{self.author.name} changed the channel name: **{self.content}**'
if self.type is MessageType.channel_icon_change:
return f'{self.author.name} changed the channel icon.'
if self.type is MessageType.pins_add:
return f'{self.author.name} pinned a message to this channel.'
if self.type is MessageType.new_member:
formats = [
"{0} joined the party.",
"{0} is here.",
"Welcome, {0}. We hope you brought pizza.",
"A wild {0} appeared.",
"{0} just landed.",
"{0} just slid into the server.",
"{0} just showed up!",
"Welcome {0}. Say hi!",
"{0} hopped into the server.",
"Everyone welcome {0}!",
"Glad you're here, {0}.",
"Good to see you, {0}.",
"Yay you made it, {0}!",
]
created_at_ms = int(self.created_at.timestamp() * 1000)
return formats[created_at_ms % len(formats)].format(self.author.name)
if self.type is MessageType.premium_guild_subscription:
if not self.content:
return f'{self.author.name} just boosted the server!'
else:
return f'{self.author.name} just boosted the server **{self.content}** times!'
if self.type is MessageType.premium_guild_tier_1:
if not self.content:
return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 1!**'
else:
return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 1!**'
if self.type is MessageType.premium_guild_tier_2:
if not self.content:
return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 2!**'
else:
return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 2!**'
if self.type is MessageType.premium_guild_tier_3:
if not self.content:
return f'{self.author.name} just boosted the server! {self.guild} has achieved **Level 3!**'
else:
return f'{self.author.name} just boosted the server **{self.content}** times! {self.guild} has achieved **Level 3!**'
if self.type is MessageType.channel_follow_add:
return (
f'{self.author.name} has added {self.content} to this channel. Its most important updates will show up here.'
)
if self.type is MessageType.guild_stream:
# the author will be a Member
return f'{self.author.name} is live! Now streaming {self.author.activity.name}' # type: ignore
if self.type is MessageType.guild_discovery_disqualified:
return 'This server has been removed from Server Discovery because it no longer passes all the requirements. Check Server Settings for more details.'
if self.type is MessageType.guild_discovery_requalified:
return 'This server is eligible for Server Discovery again and has been automatically relisted!'
if self.type is MessageType.guild_discovery_grace_period_initial_warning:
return 'This server has failed Discovery activity requirements for 1 week. If this server fails for 4 weeks in a row, it will be automatically removed from Discovery.'
if self.type is MessageType.guild_discovery_grace_period_final_warning:
return 'This server has failed Discovery activity requirements for 3 weeks in a row. If this server fails for 1 more week, it will be removed from Discovery.'
if self.type is MessageType.thread_created:
return f'{self.author.name} started a thread: **{self.content}**. See all **threads**.'
if self.type is MessageType.reply:
return self.content
if self.type is MessageType.thread_starter_message:
if self.reference is None or self.reference.resolved is None:
return 'Sorry, we couldn\'t load the first message in this thread'
# the resolved message for the reference will be a Message
return self.reference.resolved.content # type: ignore
if self.type is MessageType.guild_invite_reminder:
return 'Wondering who to invite?\nStart by inviting anyone who can help you build the server!'
@overload
async def edit(
self,
*,
content: Optional[str] = ...,
embed: Optional[Embed] = ...,
attachments: Sequence[Union[Attachment, File]] = ...,
suppress: bool = ...,
delete_after: Optional[float] = ...,
allowed_mentions: Optional[AllowedMentions] = ...,
view: Optional[View] = ...,
) -> Message:
...
@overload
async def edit(
self,
*,
content: Optional[str] = ...,
embeds: Sequence[Embed] = ...,
attachments: Sequence[Union[Attachment, File]] = ...,
suppress: bool = ...,
delete_after: Optional[float] = ...,
allowed_mentions: Optional[AllowedMentions] = ...,
view: Optional[View] = ...,
) -> Message:
...
async def edit(
self,
content: Optional[str] = MISSING,
embed: Optional[Embed] = MISSING,
embeds: Sequence[Embed] = MISSING,
attachments: Sequence[Union[Attachment, File]] = MISSING,
suppress: bool = False,
delete_after: Optional[float] = None,
allowed_mentions: Optional[AllowedMentions] = MISSING,
view: Optional[View] = MISSING,
) -> Message:
"""|coro|
Edits the message.
The content must be able to be transformed into a string via ``str(content)``.
.. versionchanged:: 1.3
The ``suppress`` keyword-only parameter was added.
.. versionchanged:: 2.0
Edits are no longer in-place, the newly edited message is returned instead.
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` instead of
``InvalidArgument``.
Parameters
-----------
content: Optional[:class:`str`]
The new content to replace the message with.
Could be ``None`` to remove the content.
embed: Optional[:class:`Embed`]
The new embed to replace the original with.
Could be ``None`` to remove the embed.
embeds: List[:class:`Embed`]
The new embeds to replace the original with. Must be a maximum of 10.
To remove all embeds ``[]`` should be passed.
.. versionadded:: 2.0
attachments: List[Union[:class:`Attachment`, :class:`File`]]
A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed
then all attachments are removed.
.. note::
New files will always appear after current attachments.
.. versionadded:: 2.0
suppress: :class:`bool`
Whether to suppress embeds for the message. This removes
all the embeds if set to ``True``. If set to ``False``
this brings the embeds back if they were suppressed.
Using this parameter requires :attr:`~.Permissions.manage_messages`.
delete_after: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message we just edited. If the deletion fails,
then it is silently ignored.
allowed_mentions: Optional[:class:`~discord.AllowedMentions`]
Controls the mentions being processed in this message. If this is
passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.
The merging behaviour only overrides attributes that have been explicitly passed
to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.
If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`
are used instead.
.. versionadded:: 1.4
view: Optional[:class:`~discord.ui.View`]
The updated view to update this message with. If ``None`` is passed then
the view is removed.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to suppress a message without permissions or
edited a message's content or embed that isn't yours.
TypeError
You specified both ``embed`` and ``embeds``
Returns
--------
:class:`Message`
The newly edited message.
"""
if content is not MISSING:
previous_allowed_mentions = self._state.allowed_mentions
else:
previous_allowed_mentions = None
if suppress is not MISSING:
flags = MessageFlags._from_value(self.flags.value)
flags.suppress_embeds = suppress
else:
flags = MISSING
if view is not MISSING:
self._state.prevent_view_updates_for(self.id)
params = handle_message_parameters(
content=content,
flags=flags,
embed=embed,
embeds=embeds,
attachments=attachments,
view=view,
allowed_mentions=allowed_mentions,
previous_allowed_mentions=previous_allowed_mentions,
)
data = await self._state.http.edit_message(self.channel.id, self.id, params=params)
message = Message(state=self._state, channel=self.channel, data=data)
if view and not view.is_finished():
self._state.store_view(view, self.id)
if delete_after is not None:
await self.delete(delay=delete_after)
return message
async def add_files(self, *files: File) -> Message:
r"""|coro|
Adds new files to the end of the message attachments.
.. versionadded:: 2.0
Parameters
-----------
\*files: :class:`File`
New files to add to the message.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to edit a message that isn't yours.
Returns
--------
:class:`Message`
The newly edited message.
"""
return await self.edit(attachments=[*self.attachments, *files])
async def remove_attachments(self, *attachments: Attachment) -> Message:
r"""|coro|
Removes attachments from the message.
.. versionadded:: 2.0
Parameters
-----------
\*attachments: :class:`Attachment`
Attachments to remove from the message.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to edit a message that isn't yours.
Returns
--------
:class:`Message`
The newly edited message.
"""
return await self.edit(attachments=[a for a in self.attachments if a not in attachments])
| 36.239823 | 179 | 0.613306 | [
"MIT"
] | NQN-Discord/discord.py | discord/message.py | 73,893 | Python |
# -*- coding: utf-8 -*-
"""Utilities for calculation job resources."""
__all__ = (
'get_default_options',
'seconds_to_timelimit',
)
def get_default_options(max_num_machines: int = 1, max_wallclock_seconds: int = 1800, with_mpi: bool = False) -> dict:
"""Return an instance of the options dictionary with the minimally required parameters for a `CalcJob`.
:param max_num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
:param with_mpi: whether to run the calculation with MPI enabled
"""
return {
'resources': {
'num_machines': int(max_num_machines)
},
'max_wallclock_seconds': int(max_wallclock_seconds),
'withmpi': with_mpi,
}
def seconds_to_timelimit(seconds: int) -> str:
"""Convert seconds into a Slum-notation time limit for the ABINIT flag `--timelimit`.
:param seconds: time limit in seconds
:returns: Slurm-notation time limit (hours:minutes:seconds)
"""
days = seconds // 86400
seconds -= days * 86400
hours = seconds // 3600
seconds -= hours * 3600
minutes = seconds // 60
seconds -= minutes * 60
timelimit = ''
if days > 0:
timelimit += f'{days}-'
if hours > 0:
timelimit += f'{hours:02d}:'
timelimit += f'{minutes:02d}:{seconds:02d}'
return timelimit
| 31.355556 | 118 | 0.653437 | [
"MIT"
] | azadoks/aiida-abinit | aiida_abinit/utils/resources.py | 1,411 | Python |
"""On-premise Gitlab clients
"""
# from .v4 import *
| 13.25 | 28 | 0.641509 | [
"BSD-3-Clause"
] | shwetagopaul92/tapis-cli-ng | tapis_cli/clients/services/gitlab/__init__.py | 53 | Python |
import json
import re
class FieldValidationException(Exception):
pass
class Field(object):
"""
This is the base class that should be used to create field validators. Sub-class this and override to_python if you
need custom validation.
"""
DATA_TYPE_STRING = 'string'
DATA_TYPE_NUMBER = 'number'
DATA_TYPE_BOOLEAN = 'boolean'
def get_data_type(self):
"""
Get the type of the field.
"""
return Field.DATA_TYPE_STRING
def __init__(self, name, title, description, required_on_create=True, required_on_edit=False):
"""
Create the field.
Arguments:
name -- Set the name of the field (e.g. "database_server")
title -- Set the human readable title (e.g. "Database server")
description -- Set the human-readable description of the field
(e.g. "The IP or domain name of the database server")
required_on_create -- If "true", the parameter is required on input stanza creation.
required_on_edit -- If "true", the parameter is required on input stanza modification.
Default values for required_on_create and required_on_edit match the
documented behavior at http://docs.splunk.com/Documentation/Splunk/latest/AdvancedDev/ModInputsScripts.
"""
# Note: there is no distinction between a None value and blank value,
# as modular input UIs does not recognize such a distinction.
if name is None or len(name.strip()) == 0:
raise ValueError("The name parameter cannot be empty.")
if title is None or len(title.strip()) == 0:
raise ValueError("The title parameter cannot be empty.")
if description is None or len(description.strip()) == 0:
raise ValueError("The description parameter cannot be empty.")
self.name = name
self.title = title
self.description = description
self.required_on_create = required_on_create
self.required_on_edit = required_on_edit
def to_python(self, value):
"""
Convert the field to a Python object. Should throw a FieldValidationException if the data is invalid.
Arguments:
value -- The value to convert
"""
# No standard validation here; the modular input framework handles empty values.
return value
def to_string(self, value):
"""
Convert the field to a string value that can be returned. Should throw a FieldValidationException if the data is
invalid.
Arguments:
value -- The value to convert
"""
return str(value)
class BooleanField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value in [True, False]:
return value
elif str(value).strip().lower() in ["true", "t", "1"]:
return True
elif str(value).strip().lower() in ["false", "f", "0"]:
return False
raise FieldValidationException(
"The value of '%s' for the '%s' parameter is not a valid boolean" % (str(value), self.name))
def to_string(self, value):
if value is True:
return "1"
elif value is False:
return "0"
return str(value)
def get_data_type(self):
return Field.DATA_TYPE_BOOLEAN
class DelimitedField(Field):
def __init__(self, name, title, description, delim, required_on_create=True, required_on_edit=False):
super(DelimitedField, self).__init__(name, title, description, required_on_create, required_on_edit)
self._delim = delim
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
tmp = value.split(self._delim)
return tmp
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_STRING
class DurationField(Field):
"""
The duration field represents a duration as represented by a string such as 1d for a 24 hour period.
The string is converted to an integer indicating the number of seconds.
"""
DURATION_RE = re.compile(r"(?P<duration>[0-9]+)\s*(?P<units>[a-z]*)", re.IGNORECASE)
MINUTE = 60
HOUR = 3600
DAY = 86400
WEEK = 604800
UNITS = {
'w': WEEK, 'week': WEEK, 'd': DAY, 'day': DAY, 'h': HOUR, 'hour': HOUR, 'm': MINUTE, 'min': MINUTE, 'minute':
MINUTE, 's': 1}
def to_python(self, value):
Field.to_python(self, value)
# Parse the duration
m = DurationField.DURATION_RE.match(value)
# Make sure the duration could be parsed
if m is None:
raise FieldValidationException(
"The value of '%s' for the '%s' parameter is not a valid duration" % (str(value), self.name))
# Get the units and duration
d = m.groupdict()
units = d['units']
# Parse the value provided
try:
duration = int(d['duration'])
except ValueError:
raise FieldValidationException(
"The duration '%s' for the '%s' parameter is not a valid number" % (d['duration'], self.name))
# Make sure the units are valid
if len(units) > 0 and units not in DurationField.UNITS:
raise FieldValidationException(
"The unit '%s' for the '%s' parameter is not a valid unit of duration" % (units, self.name))
# Convert the units to seconds
if len(units) > 0:
return duration * DurationField.UNITS[units]
else:
return duration
def to_string(self, value):
return str(value)
class FloatField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
return float(value)
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class IntegerField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
return int(value)
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class IntervalField(Field):
'''Class for handling Splunk's "interval" field, which typically accepts
an integer value OR a cron-style string. Note that this means that the
data type returned is a string, so the modular input must handle conversion
of this string to an integer at runtime.'''
# Accepted cron field formats:
# Asterisk: * (equivalent to first-last range)
# Lists: 1,2,3,4,5
# Ranges: 1-60
#
# and combinations of the above:
#
# Ranges followed by steps: 0-23/2
# Asterisks followed by steps: */2
#
# Note that we don't check explicitly for correct numeric values for each
# cron field.
cron_rx = re.compile(
r'''
(
\d{1,2} # A digit.
|\d{1,2}-\d{1,2} # A range.
|(\d{1,2},)+\d{1,2} # A list of digits.
|\d{1,2}-\d{1,2}/\d{1,2} # A range followed by a step.
|\* # The asterisk character.
|\*/\d{1,2} # An asterisk followed by a step.
)
''', re.VERBOSE)
def to_python(self, value):
try:
# Try parsing the string as an integer.
return int(value)
except ValueError:
# Try parsing the string as a cron schedule.
if self.parse_cron(value):
return value
raise FieldValidationException("The value of '{}' for the '{}' parameter is not a valid value".format(
value, self.name))
def get_data_type(self):
return Field.DATA_TYPE_STRING
def parse_cron(self, value):
'''Check for valid cron string.'''
fields = value.split()
if len(fields) == 5 and all([self.cron_rx.match(i) for i in fields]):
return True
return False
class JsonField(Field):
def to_python(self, value):
Field.to_python(self, value)
try:
return json.loads(value)
except (TypeError, ValueError):
raise FieldValidationException(
"The value of '%s' for the '%s' parameter is not a valid JSON object" % (str(value), self.name))
def to_string(self, value):
return str(value)
def get_data_type(self):
return Field.DATA_TYPE_STRING
class ListField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
return value.split(",")
else:
return []
def to_string(self, value):
if value is not None:
return ",".join(value)
return ""
class RangeField(Field):
def __init__(self, name, title, description, low, high, required_on_create=True, required_on_edit=False):
super(RangeField, self).__init__(name, title, description, required_on_create, required_on_edit)
self.low = low
self.high = high
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
tmp = int(value)
if tmp >= self.low and tmp <= self.high:
return tmp
else:
raise FieldValidationException("Value out of range.")
except ValueError as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class RegexField(Field):
def to_python(self, value):
Field.to_python(self, value)
if value is not None:
try:
return re.compile(value)
except Exception as e:
raise FieldValidationException(str(e))
else:
return None
def to_string(self, value):
if value is not None:
return value.pattern
return ""
class SeverityField(Field):
# Note: We ignore "FATAL" severity since Python's logging assigns it the
# same value as "CRITICAL".
SEVERITIES = {'DEBUG': 10, 'INFO': 20, 'WARN': 30, 'ERROR': 40, 'CRITICAL': 50}
SEVERITIES_BY_INT = {v: k for k, v in SEVERITIES.items()}
def to_python(self, value):
try:
if value in SeverityField.SEVERITIES:
return SeverityField.SEVERITIES[value]
except AttributeError:
# Did not receive a string for some reason.
pass
raise FieldValidationException("The value of '{}' for the '{}' parameter is not a valid value".format(
value, self.name))
def to_string(self, value):
if value in SeverityField.SEVERITIES_BY_INT:
return SeverityField.SEVERITIES_BY_INT[value]
else:
raise ValueError('Invalid value provided for severity.')
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class VerbosityField(Field):
def to_python(self, value):
Field.to_python(self, value)
value = int(value)
if value is not None:
if value in [10, 20, 30, 40, 50]:
return value
else:
raise FieldValidationException('Invalid value provided for verbosity, must be one of the following: ' +
'{10, 20, 30, 40, 50}')
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
| 28.426637 | 120 | 0.58334 | [
"Apache-2.0"
] | kamaljitsingh76/eventgen | splunk_eventgen/splunk_app/lib/mod_input/fields.py | 12,593 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Homer Strong, Radim Rehurek
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module implements the "hashing trick" [1]_ -- a mapping between words and their integer ids
using a fixed and static mapping.
Notes
-----
The static mapping has a constant memory footprint, regardless of the number of word-types (features) in your corpus,
so it's suitable for processing extremely large corpora. The ids are computed as `hash(word) % id_range`,
where `hash` is a user-configurable function (`zlib.adler32` by default).
Advantages:
* New words can be represented immediately, without an extra pass through the corpus
to collect all the ids first.
* Can be used with non-repeatable (once-only) streams of documents.
* All tokens will be used (not only that you see in documents), typical problem
for :class:`~gensim.corpora.dictionary.Dictionary`.
Disadvantages:
* Words may map to the same id, causing hash collisions. The word <-> id mapping is no longer a bijection.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hashing-Trick
"""
from __future__ import with_statement
import logging
import itertools
import zlib
from gensim import utils
from six import iteritems, iterkeys
logger = logging.getLogger(__name__)
class HashDictionary(utils.SaveLoad, dict):
"""Encapsulates the mapping between normalized words and their integer ids.
Notes
-----
Unlike :class:`~gensim.corpora.dictionary.Dictionary`,
building a :class:`~gensim.corpora.hashdictionary.HashDictionary` before using it **isn't a necessary step**.
The documents can be computed immediately, from an uninitialized
:class:`~gensim.corpora.hashdictionary.HashDictionary` without seeing the rest of the corpus first.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> texts = [['human', 'interface', 'computer']]
>>> dct = HashDictionary(texts)
>>> dct.doc2bow(texts[0])
[(10608, 1), (12466, 1), (31002, 1)]
"""
def __init__(self, documents=None, id_range=32000, myhash=zlib.adler32, debug=True):
"""
Parameters
----------
documents : iterable of iterable of str
Iterable of documents, if given - use them to initialization.
id_range : int, optional
Number of hash-values in table, used as `id = myhash(key) % id_range`.
myhash : function
Hash function, should support interface myhash(str) -> int, used `zlib.adler32` by default.
debug : bool
If True - store raw tokens mapping (as str <-> id).
If you find yourself running out of memory (or not sure that you really need raw tokens), set `debug=False`.
"""
self.myhash = myhash # hash fnc: string->integer
self.id_range = id_range # hash range: id = myhash(key) % id_range
self.debug = debug
# the following (potentially massive!) dictionaries are only formed if `debug` is True
self.token2id = {}
self.id2token = {} # reverse mapping int->set(words)
self.dfs = {} # token_id -> how many documents this token_id appeared in
self.dfs_debug = {} # token_string->how many documents this word appeared in
self.num_docs = 0 # number of documents processed
self.num_pos = 0 # total number of corpus positions
self.num_nnz = 0 # total number of non-zeroes in the BOW matrix
self.allow_update = True
if documents is not None:
self.add_documents(documents)
def __getitem__(self, tokenid):
"""Get all words that have mapped to the given id so far, as a set.
Warnings
--------
Works only if `debug=True`.
Parameters
----------
tokenid : int
Token identifier (result of hashing).
Return
------
set of str
Set of all corresponding words.
"""
return self.id2token.get(tokenid, set())
def restricted_hash(self, token):
"""Calculate id of the given token.
Also keep track of what words were mapped to what ids, for debugging reasons.
Parameters
----------
token : str
Input token.
Return
------
int
Hash value of `token`.
"""
h = self.myhash(utils.to_utf8(token)) % self.id_range
if self.debug:
self.token2id[token] = h
self.id2token.setdefault(h, set()).add(token)
return h
def __len__(self):
"""Get the number of distinct ids = the entire dictionary size."""
return self.id_range
def keys(self):
"""Get a list of all token ids."""
return range(len(self))
def __str__(self):
return "HashDictionary(%i id range)" % len(self)
@staticmethod
def from_documents(*args, **kwargs):
return HashDictionary(*args, **kwargs)
def add_documents(self, documents):
"""Build dictionary from a collection of documents.
Notes
-----
This is only a convenience wrapper for calling `doc2bow` on each document with `allow_update=True`.
Parameters
----------
documents : iterable of list of str
Collection of documents.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> "sparta" in dct.token2id
False
>>> dct.add_documents([["this","is","sparta"],["just","joking"]]) # add more documents in dictionary
>>> "sparta" in dct.token2id
True
"""
for docno, document in enumerate(documents):
if docno % 10000 == 0:
logger.info("adding document #%i to %s", docno, self)
self.doc2bow(document, allow_update=True) # ignore the result, here we only care about updating token ids
logger.info(
"built %s from %i documents (total %i corpus positions)",
self, self.num_docs, self.num_pos
)
def doc2bow(self, document, allow_update=False, return_missing=False):
"""Convert `document` into the bag-of-words format, like [(1, 4), (150, 1), (2005, 2)].
Notes
-----
Each word is assumed to be a **tokenized and normalized** utf-8 encoded string. No further preprocessing
is done on the words in `document` (apply tokenization, stemming etc) before calling this method.
If `allow_update` or `self.allow_update` is set, then also update dictionary in the process: update overall
corpus statistics and document frequencies. For each id appearing in this document, increase its document
frequency (`self.dfs`) by one.
Parameters
----------
document : list of str
Is a list of tokens = **tokenized and normalized** strings (either utf8 or unicode).
allow_update : bool, optional
If True - update dictionary in the process.
return_missing : bool, optional
Show token_count for missing words. HAVE NO SENSE FOR THIS CLASS, BECAUSE WE USING HASHING-TRICK.
Return
------
list of (int, int)
Document in Bag-of-words (BoW) format.
list of (int, int), dict
If `return_missing=True`, return document in Bag-of-words (BoW) format + empty dictionary.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.doc2bow(["this","is","máma"])
[(1721, 1), (5280, 1), (22493, 1)]
>>> dct.doc2bow(["this","is","máma"], return_missing=True)
([(1721, 1), (5280, 1), (22493, 1)], {})
"""
result = {}
missing = {}
document = sorted(document) # convert the input to plain list (needed below)
for word_norm, group in itertools.groupby(document):
frequency = len(list(group)) # how many times does this word appear in the input document
tokenid = self.restricted_hash(word_norm)
result[tokenid] = result.get(tokenid, 0) + frequency
if self.debug:
# increment document count for each unique token that appeared in the document
self.dfs_debug[word_norm] = self.dfs_debug.get(word_norm, 0) + 1
if allow_update or self.allow_update:
self.num_docs += 1
self.num_pos += len(document)
self.num_nnz += len(result)
if self.debug:
# increment document count for each unique tokenid that appeared in the document
# done here, because several words may map to the same tokenid
for tokenid in iterkeys(result):
self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1
# return tokenids, in ascending id order
result = sorted(iteritems(result))
if return_missing:
return result, missing
else:
return result
def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000):
"""Filter tokens in dictionary by frequency.
Parameters
----------
no_below : int, optional
Keep tokens which are contained in at least `no_below` documents.
no_above : float, optional
Keep tokens which are contained in no more than `no_above` documents
(fraction of total corpus size, not an absolute number).
keep_n : int, optional
Keep only the first `keep_n` most frequent tokens.
Notes
-----
For tokens that appear in:
#. Less than `no_below` documents (absolute number) or \n
#. More than `no_above` documents (fraction of total corpus size, **not absolute number**).
#. After (1) and (2), keep only the first `keep_n` most frequent tokens (or keep all if `None`).
Since :class:`~gensim.corpora.hashdictionary.HashDictionary` id range is fixed and doesn't depend on the number
of tokens seen, this doesn't really "remove" anything.
It only clears some supplementary statistics, for easier debugging and a smaller RAM footprint.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> dct = HashDictionary(corpus)
>>> dct.filter_extremes(no_below=1, no_above=0.5, keep_n=1)
>>> print dct.token2id
{'maso': 15025}
"""
no_above_abs = int(no_above * self.num_docs) # convert fractional threshold to absolute threshold
ok = [item for item in iteritems(self.dfs_debug) if no_below <= item[1] <= no_above_abs]
ok = frozenset(word for word, freq in sorted(ok, key=lambda x: -x[1])[:keep_n])
self.dfs_debug = {word: freq for word, freq in iteritems(self.dfs_debug) if word in ok}
self.token2id = {token: tokenid for token, tokenid in iteritems(self.token2id) if token in self.dfs_debug}
self.id2token = {
tokenid: {token for token in tokens if token in self.dfs_debug}
for tokenid, tokens in iteritems(self.id2token)
}
self.dfs = {tokenid: freq for tokenid, freq in iteritems(self.dfs) if self.id2token.get(tokenid, set())}
# for word->document frequency
logger.info(
"kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents",
no_below, no_above_abs, 100.0 * no_above
)
def save_as_text(self, fname):
"""Save this HashDictionary to a text file.
Parameters
----------
fname : str
Path to output file.
Notes
-----
The format is:
`id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.
Examples
--------
>>> from gensim.corpora import HashDictionary
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [["máma", "mele", "maso"], ["ema", "má", "máma"]]
>>> data = HashDictionary(corpus)
>>> data.save_as_text(get_tmpfile("dictionary_in_text_format"))
"""
logger.info("saving HashDictionary mapping to %s" % fname)
with utils.smart_open(fname, 'wb') as fout:
for tokenid in self.keys():
words = sorted(self[tokenid])
if words:
words_df = [(word, self.dfs_debug.get(word, 0)) for word in words]
words_df = ["%s(%i)" % item for item in sorted(words_df, key=lambda x: -x[1])]
words_df = '\t'.join(words_df)
fout.write(utils.to_utf8("%i\t%i\t%s\n" % (tokenid, self.dfs.get(tokenid, 0), words_df)))
| 37.673352 | 120 | 0.604198 | [
"MIT"
] | Abas-Khan/thesis | gensim/gensim/corpora/hashdictionary.py | 13,162 | Python |
from smartva.rules import fires_child as fires
from smartva.data.constants import *
VA = Child
def test_pass():
row = {
VA.BURN: YES,
VA.INJURY_DAYS: 0,
}
assert fires.logic_rule(row) is True
def test_fail_fires():
row = {
VA.BURN: NO,
VA.INJURY_DAYS: 0,
}
assert fires.logic_rule(row) is False
def test_fail_days():
row = {
VA.BURN: YES,
VA.INJURY_DAYS: 31,
}
assert fires.logic_rule(row) is False
def test_fail_no_data():
row = {}
assert fires.logic_rule(row) is False
| 15.157895 | 46 | 0.604167 | [
"MIT"
] | ihmeuw/SmartVA-Analyze | test/rules/test_fires_child.py | 576 | Python |
import configparser
import logging
def dict_url(conf):
"""Add all url from file url.ini with
key = name of the parking end value is
the url.
:returns: dictionnary with all parking and url
:rtype: dict
"""
url = configparser.ConfigParser()
logging.debug("initializing the variable url")
url.read(conf)
logging.debug("read the file")
logging.debug("all url in file %s", list(url["url"]))
res = {}
for simple_url in list(url["url"]):
parking = url["name"][simple_url]
link = url["url"][simple_url]
adress = url["adress"][simple_url]
res[parking] = link, adress
logging.info("this is the dict with keys and urls %s", res)
return res
| 27.846154 | 63 | 0.632597 | [
"Unlicense"
] | Mancid/data_parking_montpellier | backend/function_park/dict_url.py | 724 | Python |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.sources.tas.api import _load_data, process_csv
def test_load_data():
data = _load_data()
assert len(data) > 100, len(data)
def test_processor():
tp = process_csv(affinity_class_limit=10)
assert tp
assert tp.statements
num_stmts = len(tp.statements)
# This is the total number of statements about human genes
assert num_stmts == 51722, num_stmts
assert all(len(s.evidence) == 1 for s in tp.statements), \
"Some statements lack evidence, or have extra evidence."
| 29.761905 | 72 | 0.7296 | [
"BSD-2-Clause"
] | PritiShaw/indra | indra/tests/test_tas.py | 625 | Python |
from typing import Callable, Iterable, Sequence
import numpy as np
from dpipe.im.axes import AxesLike, AxesParams
from dpipe.itertools import lmap, squeeze_first
from dpipe.im import pad_to_shape
def pad_batch_equal(batch, padding_values: AxesParams = 0, ratio: AxesParams = 0.5):
"""
Pad each element of ``batch`` to obtain a correctly shaped array.
References
----------
`pad_to_shape`
"""
max_shapes = np.max(lmap(np.shape, batch), axis=0)
# if not scalars
if max_shapes.size != 0:
batch = [pad_to_shape(x, max_shapes, padding_values=padding_values, ratio=ratio) for x in batch]
return np.array(batch)
def unpack_args(func: Callable, *args, **kwargs):
"""
Returns a function that takes an iterable and unpacks it while calling ``func``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Examples
--------
>>> def add(x, y):
>>> return x + y
>>>
>>> add_ = unpack_args(add)
>>> add(1, 2) == add_([1, 2])
>>> True
"""
def wrapper(xs, *args_, **kwargs_):
return func(*xs, *args_, *args, **kwargs_, **kwargs)
return wrapper
def multiply(func: Callable, *args, **kwargs):
"""
Returns a function that takes an iterable and maps ``func`` over it.
Useful when multiple batches require the same function.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
"""
def wrapped(xs: Iterable, *args_, **kwargs_) -> tuple:
return tuple(func(x, *args_, *args, **kwargs_, **kwargs) for x in xs)
return wrapped
def apply_at(index: AxesLike, func: Callable, *args, **kwargs):
"""
Returns a function that takes an iterable and applies ``func`` to the values at the corresponding ``index``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
Examples
--------
>>> first_sqr = apply_at(0, np.square)
>>> first_sqr([3, 2, 1])
>>> (9, 2, 1)
"""
index = set(np.atleast_1d(index).tolist())
def wrapped(xs: Sequence, *args_, **kwargs_) -> tuple:
index_ = {i + len(xs) if i < 0 else i for i in index}
for idx in index_:
if idx < 0 or idx >= len(xs):
raise IndexError(f'Index {idx} out of bounds.')
return tuple(func(x, *args_, *args, **kwargs_, **kwargs) if i in index_ else x for i, x in enumerate(xs))
return wrapped
def zip_apply(*functions: Callable, **kwargs):
"""
Returns a function that takes an iterable and zips ``functions`` over it.
``kwargs`` are passed to each function as additional arguments.
Examples
--------
>>> zipper = zip_apply(np.square, np.sqrt)
>>> zipper([4, 9])
>>> (16, 3)
"""
def wrapped(xs: Sequence, *args, **kwargs_) -> tuple:
return tuple(func(x, *args, **kwargs_, **kwargs) for func, x in zip(functions, xs))
return wrapped
def random_apply(p: float, func: Callable, *args, **kwargs):
"""
Returns a function that applies ``func`` with a given probability ``p``.
``args`` and ``kwargs`` are passed to ``func`` as additional arguments.
"""
def wrapped(*args_, **kwargs_):
if np.random.binomial(1, p):
return func(*args_, *args, **kwargs_, **kwargs)
return squeeze_first(args_)
return wrapped
def sample_args(func: Callable, *args: Callable, **kwargs: Callable):
"""
Returns a function that samples arguments for ``func`` from ``args`` and ``kwargs``.
Each argument in ``args`` and ``kwargs`` must be a callable that samples a random value.
Examples
--------
>>> from scipy.ndimage import rotate
>>>
>>> random_rotate = sample_args(rotate, angle=np.random.normal)
>>> random_rotate(x)
>>> # same as
>>> rotate(x, angle=np.random.normal())
"""
def wrapped(*args_, **kwargs_):
return func(*args_, *([arg() for arg in args]), **kwargs_, **{name: arg() for name, arg in kwargs.items()})
return wrapped
| 28.757143 | 115 | 0.60929 | [
"MIT"
] | neuro-ml/deep_pipe | dpipe/batch_iter/utils.py | 4,026 | Python |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Click parameter type for AiiDA Plugins."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import six
import click
from aiida.cmdline.utils import decorators
from aiida.common import exceptions
from aiida.plugins.entry_point import ENTRY_POINT_STRING_SEPARATOR, ENTRY_POINT_GROUP_PREFIX, EntryPointFormat
from aiida.plugins.entry_point import format_entry_point_string, get_entry_point_string_format
from aiida.plugins.entry_point import get_entry_point, get_entry_points, get_entry_point_groups
class PluginParamType(click.ParamType):
"""
AiiDA Plugin name parameter type.
:param group: string or tuple of strings, where each is a valid entry point group. Adding the `aiida.`
prefix is optional. If it is not detected it will be prepended internally.
:param load: when set to True, convert will not return the entry point, but the loaded entry point
Usage::
click.option(... type=PluginParamType(group='aiida.calculations')
or::
click.option(... type=PluginParamType(group=('calculations', 'data'))
"""
name = 'plugin'
def __init__(self, group=None, load=False, *args, **kwargs):
"""
Validate that group is either a string or a tuple of valid entry point groups, or if it
is not specified use the tuple of all recognized entry point groups.
"""
# pylint: disable=keyword-arg-before-vararg
valid_entry_point_groups = get_entry_point_groups()
if group is None:
self._groups = tuple(valid_entry_point_groups)
else:
if isinstance(group, six.string_types):
invalidated_groups = tuple([group])
elif isinstance(group, tuple):
invalidated_groups = group
else:
raise ValueError('invalid type for group')
groups = []
for grp in invalidated_groups:
if not grp.startswith(ENTRY_POINT_GROUP_PREFIX):
grp = ENTRY_POINT_GROUP_PREFIX + grp
if grp not in valid_entry_point_groups:
raise ValueError('entry point group {} is not recognized'.format(grp))
groups.append(grp)
self._groups = tuple(groups)
self._init_entry_points()
self.load = load
super(PluginParamType, self).__init__(*args, **kwargs)
def _init_entry_points(self):
"""
Populate entry point information that will be used later on. This should only be called
once in the constructor after setting self.groups because the groups should not be changed
after instantiation
"""
self._entry_points = [(group, entry_point) for group in self.groups for entry_point in get_entry_points(group)]
self._entry_point_names = [entry_point.name for group in self.groups for entry_point in get_entry_points(group)]
@property
def groups(self):
return self._groups
@property
def has_potential_ambiguity(self):
"""
Returns whether the set of supported entry point groups can lead to ambiguity when only an entry point name
is specified. This will happen if one ore more groups share an entry point with a common name
"""
return len(self._entry_point_names) != len(set(self._entry_point_names))
def get_valid_arguments(self):
"""
Return a list of all available plugins for the groups configured for this PluginParamType instance.
If the entry point names are not unique, because there are multiple groups that contain an entry
point that has an identical name, we need to prefix the names with the full group name
:returns: list of valid entry point strings
"""
if self.has_potential_ambiguity:
fmt = EntryPointFormat.FULL
return sorted([format_entry_point_string(group, ep.name, fmt=fmt) for group, ep in self._entry_points])
return sorted(self._entry_point_names)
def get_possibilities(self, incomplete=''):
"""
Return a list of plugins starting with incomplete
"""
if incomplete == '':
return self.get_valid_arguments()
# If there is a chance of ambiguity we always return the entry point string in FULL format, otherwise
# return the possibilities in the same format as the incomplete. Note that this may have some unexpected
# effects. For example if incomplete equals `aiida.` or `calculations` it will be detected as the MINIMAL
# format, even though they would also be the valid beginnings of a FULL or PARTIAL format, except that we
# cannot know that for sure at this time
if self.has_potential_ambiguity:
possibilites = [eps for eps in self.get_valid_arguments() if eps.startswith(incomplete)]
else:
possibilites = []
fmt = get_entry_point_string_format(incomplete)
for group, entry_point in self._entry_points:
entry_point_string = format_entry_point_string(group, entry_point.name, fmt=fmt)
if entry_point_string.startswith(incomplete):
possibilites.append(entry_point_string)
return possibilites
def complete(self, ctx, incomplete): # pylint: disable=unused-argument
"""
Return possible completions based on an incomplete value
:returns: list of tuples of valid entry points (matching incomplete) and a description
"""
return [(p, '') for p in self.get_possibilities(incomplete=incomplete)]
def get_missing_message(self, param):
return 'Possible arguments are:\n\n' + '\n'.join(self.get_valid_arguments())
def get_entry_point_from_string(self, entry_point_string):
"""
Validate a given entry point string, which means that it should have a valid entry point string format
and that the entry point unambiguously corresponds to an entry point in the groups configured for this
instance of PluginParameterType.
:returns: the entry point if valid
:raises: ValueError if the entry point string is invalid
"""
group = None
name = None
entry_point_format = get_entry_point_string_format(entry_point_string)
if entry_point_format in (EntryPointFormat.FULL, EntryPointFormat.PARTIAL):
group, name = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR)
if entry_point_format == EntryPointFormat.PARTIAL:
group = ENTRY_POINT_GROUP_PREFIX + group
if group not in self.groups:
raise ValueError('entry point group {} is not supported by this parameter')
elif entry_point_format == EntryPointFormat.MINIMAL:
name = entry_point_string
matching_groups = [group for group, entry_point in self._entry_points if entry_point.name == name]
if len(matching_groups) > 1:
raise ValueError("entry point '{}' matches more than one valid entry point group [{}], "
"please specify an explicit group prefix".format(name, ' '.join(matching_groups)))
elif not matching_groups:
raise ValueError("entry point '{}' is not valid for any of the allowed "
"entry point groups: {}".format(name, ' '.join(self.groups)))
else:
group = matching_groups[0]
else:
ValueError('invalid entry point string format: {}'.format(entry_point_string))
try:
entry_point = get_entry_point(group, name)
except exceptions.EntryPointError as exception:
raise ValueError(exception)
return entry_point
@decorators.with_dbenv()
def convert(self, value, param, ctx):
"""
Convert the string value to an entry point instance, if the value can be successfully parsed
into an actual entry point. Will raise click.BadParameter if validation fails.
"""
if not value:
raise click.BadParameter('plugin name cannot be empty')
try:
entry_point = self.get_entry_point_from_string(value)
except ValueError as exception:
raise click.BadParameter(str(exception))
if self.load:
try:
return entry_point.load()
except exceptions.LoadingEntryPointError as exception:
raise click.BadParameter(str(exception))
else:
return entry_point
| 42.0181 | 120 | 0.643011 | [
"BSD-2-Clause"
] | DanielMarchand/aiida_core | aiida/cmdline/params/types/plugin.py | 9,286 | Python |
from BoundingBox import *
from eval_utils import *
class BoundingBoxes:
def __init__(self):
self._boundingBoxes = []
def addBoundingBox(self, bb):
self._boundingBoxes.append(bb)
def removeBoundingBox(self, _boundingBox):
for d in self._boundingBoxes:
if BoundingBox.compare(d, _boundingBox):
del self._boundingBoxes[d]
return
def removeAllBoundingBoxes(self):
self._boundingBoxes = []
def getBoundingBoxes(self):
return self._boundingBoxes
def getBoundingBoxByClass(self, classId):
boundingBoxes = []
for d in self._boundingBoxes:
if d.getClassId() == classId: # get only specified bounding box type
boundingBoxes.append(d)
return boundingBoxes
def getClasses(self):
classes = []
for d in self._boundingBoxes:
c = d.getClassId()
if c not in classes:
classes.append(c)
return classes
def getBoundingBoxesByType(self, bbType):
# get only specified bb type
return [d for d in self._boundingBoxes if d.getBBType() == bbType]
def getBoundingBoxesByImageName(self, imageName):
# get only specified bb type
return [d for d in self._boundingBoxes if d.getImageName() == imageName]
def count(self, bbType=None):
if bbType is None: # Return all bounding boxes
return len(self._boundingBoxes)
count = 0
for d in self._boundingBoxes:
if d.getBBType() == bbType: # get only specified bb type
count += 1
return count
def clone(self):
newBoundingBoxes = BoundingBoxes()
for d in self._boundingBoxes:
det = BoundingBox.clone(d)
newBoundingBoxes.addBoundingBox(det)
return newBoundingBoxes
def drawAllBoundingBoxes(self, image, imageName):
bbxes = self.getBoundingBoxesByImageName(imageName)
for bb in bbxes:
if bb.getBBType() == BBType.GroundTruth: # if ground truth
image = add_bb_into_image(image, bb, color=(0, 255, 0)) # green
else: # if detection
image = add_bb_into_image(image, bb, color=(255, 0, 0)) # red
return image
# def drawAllBoundingBoxes(self, image):
# for gt in self.getBoundingBoxesByType(BBType.GroundTruth):
# image = add_bb_into_image(image, gt ,color=(0,255,0))
# for det in self.getBoundingBoxesByType(BBType.Detected):
# image = add_bb_into_image(image, det ,color=(255,0,0))
# return image
| 34.012821 | 81 | 0.614776 | [
"Apache-2.0"
] | videetparekh/model-zoo-models | ssd_mobilenetv2/BoundingBoxes.py | 2,653 | Python |
#!/usr/bin/env python
#============================================================================
# Copyright (C) Microsoft Corporation, All rights reserved.
#============================================================================
import os
import imp
import re
import codecs
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
# backwards compatibility with pre-multi-homing bundles
conf_path = '/etc/opt/microsoft/omsagent/conf/omsagent.conf'
omi_map_path = '/etc/opt/microsoft/omsagent/conf/omsagent.d/omi_mapping.json'
omi_map = None
multi_homed = None
non_mh_heartbeat_cmd = '/opt/microsoft/omsagent/bin/omsadmin.sh -b'
oms_restart_cmd = 'sudo /opt/microsoft/omsagent/bin/service_control restart'
def init_paths(WorkspaceID):
global conf_path
global omi_map_path
global multi_homed
omsagent_dir = '/etc/opt/microsoft/omsagent/'
mh_conf_dir = omsagent_dir + WorkspaceID + '/conf'
multi_homed = os.path.isdir(mh_conf_dir)
if multi_homed:
LG().Log('INFO', 'OMSAgent is multi-homed and resource is updating workspace ' + WorkspaceID)
conf_path = mh_conf_dir + '/omsagent.conf'
omi_map_path = mh_conf_dir + '/omsagent.d/omi_mapping.json'
def init_omi_map():
global omi_map
txt = codecs.open(omi_map_path, 'r', 'utf8').read()
omi_map = eval(txt)
def init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_paths(WorkspaceID)
init_omi_map()
if WorkspaceID is not None:
WorkspaceID = WorkspaceID.encode('ascii', 'ignore')
else:
WorkspaceID = ''
if PerfCounterObject is not None:
for perf in PerfCounterObject:
new_perfs = []
if len(perf['PerformanceCounter'].value):
for perf_counter in perf['PerformanceCounter'].value:
new_perfs.append(perf_counter.encode('ascii', 'ignore'))
perf['PerformanceCounter'] = new_perfs
if perf['InstanceName'].value is None:
perf['InstanceName'] = ''
else:
perf['InstanceName'] = perf[
'InstanceName'].value.encode('ascii', 'ignore')
if perf['ObjectName'].value is None:
perf['ObjectName'] = ''
else:
perf['ObjectName'] = perf[
'ObjectName'].value.encode('ascii', 'ignore')
if perf['AllInstances'].value is None:
perf['AllInstances'] = False
else:
if perf['AllInstances'].value.value == 1:
perf['AllInstances'] = True
else:
perf['AllInstances'] = False
perf['IntervalSeconds'] = perf['IntervalSeconds'].value.value
def Set_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
return Set(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
def Test_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
return Test(HeartbeatIntervalSeconds, PerfCounterObject)
def Get_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
arg_names = list(locals().keys())
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
retval = 0
NewHeartbeatIntervalSeconds, NewPerf = Get(
HeartbeatIntervalSeconds, PerfCounterObject)
for perf in NewPerf:
if len(perf['PerformanceCounter']):
perf['PerformanceCounter'] = protocol.MI_StringA(
perf['PerformanceCounter'])
perf['ObjectName'] = protocol.MI_String(perf['ObjectName'])
perf['InstanceName'] = protocol.MI_String(perf['InstanceName'])
perf['AllInstances'] = protocol.MI_Boolean(perf['AllInstances'])
perf['IntervalSeconds'] = protocol.MI_Uint16(perf['IntervalSeconds'])
PerfCounterObject = protocol.MI_InstanceA(NewPerf)
HeartbeatIntervalSeconds = protocol.MI_Uint16(NewHeartbeatIntervalSeconds)
WorkspaceID = protocol.MI_String(WorkspaceID)
Name = protocol.MI_String(Name)
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
def Set(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
if Test(HeartbeatIntervalSeconds, PerfCounterObject) == [0]:
return [0]
if UpdateOMSAgentConf(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
return [0]
else:
return [-1]
def Test(HeartbeatIntervalSeconds, PerfCounterObject):
prune_perfs(PerfCounterObject)
NewHeartbeatIntervalSeconds, NewPerfs = ReadOMSAgentConf(
HeartbeatIntervalSeconds, PerfCounterObject)
if NewHeartbeatIntervalSeconds != HeartbeatIntervalSeconds:
return [-1]
PerfCounterObject.sort()
for perf in PerfCounterObject:
perf['PerformanceCounter'].sort()
perf['AllInstances'] = True
NewPerfs.sort()
for perf in NewPerfs:
perf['PerformanceCounter'].sort()
if PerfCounterObject != NewPerfs:
return [-1]
return [0]
def Get(HeartbeatIntervalSeconds, PerfCounterObject):
NewHeartbeatIntervalSeconds, NewPerf = ReadOMSAgentConf(
HeartbeatIntervalSeconds, PerfCounterObject)
return NewHeartbeatIntervalSeconds, NewPerf
def TranslatePerfs(object_name, perfs):
d = {}
for p in perfs:
for cname in omi_map:
for prop in cname['CimProperties']:
if (p == prop['CounterName'] or p == prop['CimPropertyName']) and cname['ObjectName'] == object_name:
if cname['ObjectName'] not in d.keys():
d[cname['ObjectName']] = [p]
else:
d[cname['ObjectName']].append(p)
return d
def ReadOMSAgentConf(HeartbeatIntervalSeconds, PerfCounterObject):
txt = ''
try:
txt = codecs.open(conf_path, 'r', 'utf8').read().encode(
'ascii', 'ignore')
LG().Log('INFO', 'Read omsagent configuration ' + conf_path + '.')
except:
LG().Log(
'ERROR', 'Unable to read omsagent configuration ' + conf_path + '.')
heartbeat_srch_str = r'<source>.*?tag heartbeat.*?run_interval ([0-9]+[a-z])\n</source>\n'
heartbeat_srch = re.compile(heartbeat_srch_str, re.M | re.S)
m = heartbeat_srch.search(txt)
if m is not None:
interval = int(m.group(1)[:-1])
if m.group(1)[-1:] == 'm':
interval *= 60
else:
interval = None
new_heartbeat = interval
perf_src_srch_str = r'\n<source>\n type oms_omi.*?object_name "(.*?)".*?instance_regex "(.*?)".*?counter_name_regex "(.*?)".*?interval ([0-9]+[a-z]).*?</source>\n'
perf_src_srch = re.compile(perf_src_srch_str, re.M | re.S)
new_perfobj = []
sources = perf_src_srch.findall(txt)
inst = ''
interval = 0
for source in sources:
s_perf = []
if len(source[2]):
s_perf = source[2].strip('(').strip(')').split('|')
object_name = source[0]
interval = int(source[3][:-1])
if source[3][-1:] == 'm':
interval *= 60
inst = source[1]
inst = inst.replace('.*', '*')
new_perfobj.append({'PerformanceCounter': s_perf, 'InstanceName': inst,
'IntervalSeconds': interval, 'AllInstances': True, 'ObjectName': object_name})
return new_heartbeat, new_perfobj
def UpdateOMSAgentConf(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
if os.path.exists(conf_path):
txt = codecs.open(conf_path, 'r', 'utf8').read().encode(
'ascii', 'ignore')
LG().Log('INFO', 'Read omsagent configuration ' + conf_path + '.')
else:
LG().Log(
'INFO', 'No omsagent configuration file present. Will create new configuration file at ' + conf_path + '.')
txt = ''
heartbeat_srch_str = r'<source>.*?tag heartbeat.*?</source>\n'
heartbeat_srch = re.compile(heartbeat_srch_str, re.M | re.S)
heartbeat_cmd = non_mh_heartbeat_cmd
if multi_homed:
heartbeat_cmd = 'echo'
heartbeat_src = '<source>\n type exec\n tag heartbeat.output\n command ' + heartbeat_cmd + ' > /dev/null\n format tsv\n keys severity,message\n run_interval ' + \
str(HeartbeatIntervalSeconds) + 's\n</source>\n'
txt = heartbeat_srch.sub(heartbeat_src, txt)
d = {}
perf_src_srch_str = r'\n<source>\n type oms_omi.*?</source>\n'
perf_src_srch = re.compile(perf_src_srch_str, re.M | re.S)
for source in perf_src_srch.findall(txt):
txt = txt.replace(source, '')
new_source = ''
for perf in PerfCounterObject:
d = TranslatePerfs(perf['ObjectName'], perf['PerformanceCounter'])
for k in d.keys():
names = '(' + reduce(lambda x, y: x + '|' + y, d[k]) + ')'
instances = re.sub(r'([><]|>|<)', '', perf['InstanceName'])
instances = re.sub(r'([*])', '.*', instances)
new_source += '\n<source>\n type oms_omi\n object_name "' + k + '"\n instance_regex "' + instances + \
'"\n counter_name_regex "' + names + '"\n interval ' + \
str(perf['IntervalSeconds']) + 's\n</source>\n'
m = heartbeat_srch.search(txt)
if m is not None:
i = m.end(0) + 1
txt = txt[:i] + new_source + txt[i:]
else:
txt = new_source
try:
codecs.open(conf_path, 'w', 'utf8').write(txt)
LG().Log(
'INFO', 'Created omsagent configuration at ' + conf_path + '.')
except:
LG().Log(
'ERROR', 'Unable to create omsagent configuration at ' + conf_path + '.')
return False
global oms_restart_cmd
process_to_restart = 'omsagent'
if multi_homed:
restart_cmd += ' ' + WorkspaceID
process_to_restart += '-' + WorkspaceID
if os.system(restart_cmd) == 0:
LG().Log('INFO', 'Successfully restarted ' + process_to_restart + '.')
else:
LG().Log('ERROR', 'Error restarting ' + process_to_restart + '.')
return False
return True
def rm_unicode(obj):
if isinstance(obj, dict):
d = {}
for k, v in obj.iteritems():
d[rm_unicode(k)] = rm_unicode(v)
return d
elif isinstance(obj, list):
return [rm_unicode(i) for i in obj]
elif isinstance(obj, unicode):
return obj.encode('ascii', 'ignore')
else:
return obj
def prune_perfs(PerfCounterObject):
l = len(PerfCounterObject)
i = 0
while i < l:
d = TranslatePerfs(PerfCounterObject[i]['ObjectName'], PerfCounterObject[i]['PerformanceCounter'])
if PerfCounterObject[i]['ObjectName'] in d.keys():
for p in PerfCounterObject[i]['PerformanceCounter']:
if p not in d[PerfCounterObject[i]['ObjectName']]:
LG().Log('INFO', 'No match for PerformanceCounter \'' \
+ p + '\' in ' \
+ repr(PerfCounterObject[i]['ObjectName']) + ' in omi_mapping.json, ignoring.')
PerfCounterObject[i]['PerformanceCounter'].remove(p)
if len(PerfCounterObject[i]['PerformanceCounter']) == 0:
PerfCounterObject.pop(i)
l -= 1
i -= 1
else:
LG().Log('INFO', 'No matches for ObjectName ' \
+ repr(PerfCounterObject[i]['ObjectName']) + ' and PerformanceCounter ' \
+ repr(PerfCounterObject[i]['PerformanceCounter']) + ' in omi_mapping.json, ignoring.')
PerfCounterObject.pop(i)
l -= 1
i -= 1
i += 1
| 39.476667 | 172 | 0.606772 | [
"MIT"
] | MicrosoftDocs/PowerShell-DSC-for-Linux | Providers/Scripts/2.4x-2.5x/Scripts/nxOMSPerfCounter.py | 11,843 | Python |
"""Parses the arguments passed to the bash script and returns them back to the bash script."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import re
import sys
# Technique for printing custom error and help
# Source: https://stackoverflow.com/a/4042861/862857
class CustomParser(argparse.ArgumentParser):
def error(self, message):
print('{}: error: {}'.format(self.prog, message), file=sys.stderr)
self.print_help()
sys.exit(1)
parser = CustomParser(prog='create_binauthz_attestation')
# By default, arguments with "--" are optional, so we have
# to make our own argument group so they are required
required_arguments = parser.add_argument_group('required arguments')
required_arguments.add_argument(
'--artifact-url',
type=str,
help='Registry URL for container image',
required=True)
attestor_args = parser.add_argument_group('Attestor arguments')
attestor_args.add_argument(
'--attestor',
type=str,
help='Fully qualified attestor name or just the attestor name',
required=True)
attestor_args.add_argument(
'--attestor-project',
type=str,
help='The project that the attestor is a part of')
pgp_args = parser.add_argument_group('PGP key arguments')
pgp_args.add_argument(
'--pgp-key-fingerprint',
type=str,
help='The fingerprint of the PGP key you plan to use')
# If the user is using KMS, they should provide:
kms_args = parser.add_argument_group('KMS key arguments')
kms_args.add_argument(
'--keyversion',
type=str,
help='The fully qualified keyversion or the version number of the KMS key')
kms_args.add_argument(
'--keyversion-key', type=str, help='The name of the KMS key')
kms_args.add_argument(
'--keyversion-keyring', type=str, help='The keyring for the KMS key')
kms_args.add_argument(
'--keyversion-location', type=str, help='The location of the KMS key')
kms_args.add_argument(
'--keyversion-project',
type=str,
help='The project that the KMS key belongs to')
args = parser.parse_args()
# Validate and parse attestor resource flags.
if '/' not in args.attestor:
if not args.attestor_project:
parser.error('The --attestor-project option is required if '
'--attestor is not a fully qualified '
'Attestor resource identifier')
else:
args.attestor = 'projects/{project}/attestors/{attestor}'.format(
project=args.attestor_project, attestor=args.attestor)
attestor_regex = re.compile(r'^projects/[a-z0-9-]*/attestors/[a-zA-Z0-9-_]*$')
if not attestor_regex.search(args.attestor):
parser.error('Attestor "{attestor}" is not '
'a valid attestor name'.format(attestor=args.attestor))
# Enforce mutual exclusion of key flag types.
keyversion_args = [
args.keyversion, args.keyversion_key, args.keyversion_keyring,
args.keyversion_location, args.keyversion_project
]
if args.pgp_key_fingerprint and any(keyversion_args):
parser.error('You cannot set --pgp-key-fingerprint and --keyversion related'
' options at the same time.')
if not args.pgp_key_fingerprint and not any(keyversion_args):
parser.error('Either --pgp-key-fingerprint or --keyversion related'
' options must be set.')
# Validate and parse keyversion resource flags.
if args.keyversion is not None and '/' not in args.keyversion:
if not all(keyversion_args):
parser.error(
'The --keyversion-key, --keyversion-keyring, --keyversion-location, '
'and --keyversion-project options are required if --keyversion '
'is not a fully qualified KMS key resource identifier.')
else:
args.keyversion = (
'projects/{project}/locations/{location}/keyRings/{keyRing}/'
'cryptoKeys/{cryptoKey}/cryptoKeyVersions/{keyversion}').format(
project=args.keyversion_project,
location=args.keyversion_location,
keyRing=args.keyversion_keyring,
cryptoKey=args.keyversion_key,
keyversion=args.keyversion)
keyversion_regex = re.compile(r'^projects/[a-z0-9-]*/locations/[a-z0-9-]*'
r'/keyRings/[a-zA-Z0-9-_]*/cryptoKeys/'
r'[a-zA-Z0-9-_]*/cryptoKeyVersions/[1-9][0-9]*$')
if args.keyversion is not None and not keyversion_regex.search(args.keyversion):
parser.error('"{}" is not a valid fully qualified KMS key identifier.'.format(
args.keyversion))
arguments_list = []
for arg_name, value in args.__dict__.items():
arguments_list.append('[{name}]="{value}"'.format(
name=arg_name, value=value or ''))
print('\n'.join(arguments_list))
| 36.905512 | 94 | 0.703008 | [
"Apache-2.0"
] | 2733284198/cloud-builders-community | binauthz-attestation/parse_arguments.py | 4,687 | Python |
from stack import Stack as s
class Queue:
def __init__(self, iter=[]):
self.stack_one = s()
self.stack_two = s()
self._len = 0
for item in iter:
self.enqueue(item)
def enqueue(self, value):
if value:
self.stack_one.push(value)
self._len += 1
return self.stack_one
return False
def dequeue(self):
if self._len == 0:
return False
else:
for _ in range(self._len - 2):
self.stack_two.push(self.stack_two.pop())
last = self.stack_one.pop()
for _ in range(self._len - 2):
self.stack_one.push(self.stack_two.pop())
self._len -= 1
return last | 25.866667 | 57 | 0.501289 | [
"MIT"
] | bhold6160/data-structures-and-algorithms | queue-with-stacks/queue_with_stacks.py | 776 | Python |
number = 5
number2 = 'five'
print(number)
breakpoint()
print(str(number) + " " + number2)
| 10.333333 | 34 | 0.645161 | [
"MIT"
] | PacktPublishing/Applied-Computational-Thinking-with-Python | Chapter07/ch7_debugger3.py | 93 | Python |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import enum
import typing
from . import abstract
from . import base_con
from . import enums
from . import errors
from . import options
from .datatypes import datatypes
from .protocol import protocol
__all__ = ('Transaction', 'AsyncIOTransaction')
class TransactionState(enum.Enum):
NEW = 0
STARTED = 1
COMMITTED = 2
ROLLEDBACK = 3
FAILED = 4
class BaseTransaction:
__slots__ = (
'_connection',
'_connection_inner',
'_connection_impl',
'_pool',
'_options',
'_state',
'_managed',
)
def __init__(self, owner, options: options.TransactionOptions):
if isinstance(owner, base_con.BaseConnection):
self._connection = owner
self._connection_inner = owner._inner
self._pool = None
else:
self._connection = None
self._connection_inner = None
self._pool = owner
self._connection_impl = None
self._options = options
self._state = TransactionState.NEW
self._managed = False
def is_active(self) -> bool:
return self._state is TransactionState.STARTED
def __check_state_base(self, opname):
if self._state is TransactionState.COMMITTED:
raise errors.InterfaceError(
'cannot {}; the transaction is already committed'.format(
opname))
if self._state is TransactionState.ROLLEDBACK:
raise errors.InterfaceError(
'cannot {}; the transaction is already rolled back'.format(
opname))
if self._state is TransactionState.FAILED:
raise errors.InterfaceError(
'cannot {}; the transaction is in error state'.format(
opname))
def __check_state(self, opname):
if self._state is not TransactionState.STARTED:
if self._state is TransactionState.NEW:
raise errors.InterfaceError(
'cannot {}; the transaction is not yet started'.format(
opname))
self.__check_state_base(opname)
def _make_start_query(self):
self.__check_state_base('start')
if self._state is TransactionState.STARTED:
raise errors.InterfaceError(
'cannot start; the transaction is already started')
return self._options.start_transaction_query()
def _make_commit_query(self):
self.__check_state('commit')
return 'COMMIT;'
def _make_rollback_query(self):
self.__check_state('rollback')
return 'ROLLBACK;'
def _borrow(self):
inner = self._connection_inner
if inner._borrowed_for:
raise base_con.borrow_error(inner._borrowed_for)
inner._borrowed_for = base_con.BorrowReason.TRANSACTION
def _maybe_return(self):
if self._connection_inner is not None:
self._connection_inner._borrowed_for = None
def __repr__(self):
attrs = []
attrs.append('state:{}'.format(self._state.name.lower()))
attrs.append(repr(self._options))
if self.__class__.__module__.startswith('edgedb.'):
mod = 'edgedb'
else:
mod = self.__class__.__module__
return '<{}.{} {} {:#x}>'.format(
mod, self.__class__.__name__, ' '.join(attrs), id(self))
class BaseAsyncIOTransaction(BaseTransaction, abstract.AsyncIOExecutor):
__slots__ = ()
async def _start(self, single_connect=False) -> None:
query = self._make_start_query()
if self._pool is not None:
self._connection = await self._pool._acquire()
self._connection_inner = self._connection._inner
inner = self._connection_inner
if not inner._impl or inner._impl.is_closed():
await self._connection._reconnect(single_attempt=single_connect)
self._connection_impl = self._connection._inner._impl
try:
await self._connection_impl.privileged_execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.STARTED
async def _commit(self):
try:
query = self._make_commit_query()
try:
await self._connection_impl.privileged_execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.COMMITTED
finally:
self._maybe_return()
if self._pool is not None:
await self._pool._release(self._connection)
async def _rollback(self):
try:
query = self._make_rollback_query()
try:
await self._connection_impl.privileged_execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.ROLLEDBACK
finally:
self._maybe_return()
if self._pool is not None:
await self._pool._release(self._connection)
async def _ensure_transaction(self):
pass
async def query(self, query: str, *args, **kwargs) -> datatypes.Set:
await self._ensure_transaction()
con = self._connection_inner
result, _ = await self._connection_impl._protocol.execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
io_format=protocol.IoFormat.BINARY,
)
return result
async def query_single(self, query: str, *args, **kwargs) -> typing.Any:
await self._ensure_transaction()
con = self._connection_inner
result, _ = await self._connection_impl._protocol.execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
expect_one=True,
io_format=protocol.IoFormat.BINARY,
)
return result
async def query_json(self, query: str, *args, **kwargs) -> str:
await self._ensure_transaction()
con = self._connection_inner
result, _ = await self._connection_impl._protocol.execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
io_format=protocol.IoFormat.JSON,
)
return result
async def query_single_json(self, query: str, *args, **kwargs) -> str:
await self._ensure_transaction()
con = self._connection_inner
result, _ = await self._connection_impl._protocol.execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
expect_one=True,
io_format=protocol.IoFormat.JSON,
)
return result
async def execute(self, query: str) -> None:
"""Execute an EdgeQL command (or commands).
Example:
.. code-block:: pycon
>>> await con.execute('''
... CREATE TYPE MyType { CREATE PROPERTY a -> int64 };
... FOR x IN {100, 200, 300} UNION INSERT MyType { a := x };
... ''')
"""
await self._ensure_transaction()
await self._connection_impl._protocol.simple_query(
query, enums.Capability.EXECUTE)
class AsyncIOTransaction(BaseAsyncIOTransaction):
__slots__ = ()
async def __aenter__(self):
if self._managed:
raise errors.InterfaceError(
'cannot enter context: already in an `async with` block')
self._managed = True
await self.start()
return self
async def __aexit__(self, extype, ex, tb):
try:
if extype is not None:
await self._rollback()
else:
await self._commit()
finally:
self._managed = False
async def start(self) -> None:
"""Enter the transaction or savepoint block."""
await self._start()
self._borrow()
async def commit(self) -> None:
"""Exit the transaction or savepoint block and commit changes."""
if self._managed:
raise errors.InterfaceError(
'cannot manually commit from within an `async with` block')
await self._commit()
async def rollback(self) -> None:
"""Exit the transaction or savepoint block and rollback changes."""
if self._managed:
raise errors.InterfaceError(
'cannot manually rollback from within an `async with` block')
await self._rollback()
class BaseBlockingIOTransaction(BaseTransaction, abstract.Executor):
__slots__ = ()
def _start(self, single_connect=False) -> None:
query = self._make_start_query()
# no pools supported for blocking con
inner = self._connection_inner
if not inner._impl or inner._impl.is_closed():
self._connection._reconnect(single_attempt=single_connect)
self._connection_inner = self._connection._inner
self._connection_impl = self._connection_inner._impl
try:
self._connection_impl.privileged_execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.STARTED
def _commit(self):
try:
query = self._make_commit_query()
try:
self._connection_impl.privileged_execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.COMMITTED
finally:
self._maybe_return()
def _rollback(self):
try:
query = self._make_rollback_query()
try:
self._connection_impl.privileged_execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.ROLLEDBACK
finally:
self._maybe_return()
def _ensure_transaction(self):
pass
def query(self, query: str, *args, **kwargs) -> datatypes.Set:
self._ensure_transaction()
con = self._connection_inner
return self._connection_impl._protocol.sync_execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
io_format=protocol.IoFormat.BINARY,
)
def query_single(self, query: str, *args, **kwargs) -> typing.Any:
self._ensure_transaction()
con = self._connection_inner
return self._connection_impl._protocol.sync_execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
expect_one=True,
io_format=protocol.IoFormat.BINARY,
)
def query_json(self, query: str, *args, **kwargs) -> str:
self._ensure_transaction()
con = self._connection_inner
return self._connection_impl._protocol.sync_execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
io_format=protocol.IoFormat.JSON,
)
def query_single_json(self, query: str, *args, **kwargs) -> str:
self._ensure_transaction()
con = self._connection_inner
return self._connection_impl._protocol.sync_execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=con._codecs_registry,
qc=con._query_cache,
expect_one=True,
io_format=protocol.IoFormat.JSON,
)
def execute(self, query: str) -> None:
self._ensure_transaction()
self._connection_impl._protocol.sync_simple_query(
query, enums.Capability.EXECUTE)
class Transaction(BaseBlockingIOTransaction):
def __enter__(self):
if self._managed:
raise errors.InterfaceError(
'cannot enter context: already in a `with` block')
self._managed = True
self.start()
return self
def __exit__(self, extype, ex, tb):
try:
if extype is not None:
self._rollback()
else:
self._commit()
finally:
self._managed = False
def start(self) -> None:
"""Enter the transaction or savepoint block."""
self._start()
self._borrow()
def commit(self) -> None:
"""Exit the transaction or savepoint block and commit changes."""
if self._managed:
raise errors.InterfaceError(
'cannot manually commit from within a `with` block')
self._commit()
def rollback(self) -> None:
"""Exit the transaction or savepoint block and rollback changes."""
if self._managed:
raise errors.InterfaceError(
'cannot manually rollback from within a `with` block')
self._rollback()
| 32.696347 | 77 | 0.601215 | [
"Apache-2.0"
] | edgedb/edgedb-python | edgedb/transaction.py | 14,321 | Python |
import os
import datetime
import logging
import sqlite3
import pytest
from utils import setup_mdb_dir, all_book_info, load_db_from_sql_file, TESTS_DIR
from manga_db.manga_db import MangaDB
from manga_db.manga import Book
from manga_db.ext_info import ExternalInfo
from manga_db.constants import LANG_IDS
@pytest.mark.parametrize("title_eng, title_foreign, expected", [
("English", "Foreign", "English / Foreign"),
("English", None, "English"),
(None, "Foreign", "Foreign")])
def test_build_title(title_eng, title_foreign, expected):
assert Book.build_title(title_eng, title_foreign) == expected
def test_fetch_extinfo(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = Book(mdb, in_db=False, id=16)
assert b.ext_infos == []
db_con = memdb
ei_rows_man = db_con.execute("SELECT * FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
ei1 = ExternalInfo(mdb, b, **ei_rows_man[0])
ei2 = ExternalInfo(mdb, b, **ei_rows_man[1])
assert b._fetch_external_infos() == [ei1, ei2]
def test_fetch_assoc_col(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = Book(mdb, in_db=False, id=14)
tags = ["Ahegao", "Anal", "Collar", "Large Breasts", "Maid", "Mind Break",
"Mind Control", "Nakadashi", "Office Lady", "Pantyhose", "Rape", "Stockings",
"X-ray"]
assert sorted(b._fetch_associated_column("tag")) == sorted(tags)
assert b._fetch_associated_column("character") == []
assert b._fetch_associated_column("artist") == ["Fan no Hitori"]
def test_upd_assoc_col(monkeypatch, setup_mdb_dir):
# update_assoc_columns/get_assoc_cols
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
# pass last_change kwarg so it doesnt get auto set and counts as change
b = Book(mdb, in_db=False, id=12, last_change=datetime.date.today())
ei_row = db_con.execute("SELECT * FROM ExternalInfo WHERE id = 12").fetchone()
ei = ExternalInfo(mdb, b, **ei_row)
tags = ("Anal;Femdom;Large Breasts;Nakadashi;Straight Shota;Big Ass;Short Hair;Hat"
";Royalty;Dark Skin;Huge Penis;Big Areola;Defloration;Double Penetration;"
"Elder Sister;Tall Girl".split(";"))
artists = ["Kaneda Asou"]
category = ["Doujinshi"]
groups = ["Dokumushi Shokeitai"]
lists = ["to-read"]
assoc_cols = b.get_associated_columns()
assert assoc_cols["tag"] == tags
assert assoc_cols["artist"] == artists
assert assoc_cols["category"] == category
assert assoc_cols["groups"] == groups
assert assoc_cols["list"] == lists
assert assoc_cols["character"] == []
assert assoc_cols["collection"] == []
assert assoc_cols["parody"] == []
assert assoc_cols["ext_infos"] == [ei]
# upd
# changes
b.tag = ["delchange1", "delchange"]
b.category = ["testcat"]
b.update_assoc_columns_from_db()
# changes should be reset
assert not b._committed_state
assert b.tag == tags
assert b.artist == artists
assert b.category == category
assert b.groups == groups
assert b.list == lists
assert b.character == []
assert b.collection == []
assert b.parody == []
assert b.ext_infos == [ei]
b = Book(mdb, in_db=False, id=16, last_change=datetime.date.today())
ei_rows = db_con.execute("SELECT * FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
ei1 = ExternalInfo(mdb, b, **ei_rows[0])
ei2 = ExternalInfo(mdb, b, **ei_rows[1])
tags = ("Blowjob;Ahegao;Megane;Happy Sex;Threesome;Group Sex;Layer Cake;Selfcest".split(";"))
artists = ["bariun"]
category = ["Doujinshi"]
characters = ["Akira Kurusu", "Futaba Sakura"]
parodies = ["Persona 5 / ペルソナ5"]
lists = ["to-read"]
assoc_cols = b.get_associated_columns()
assert assoc_cols["tag"] == tags
assert assoc_cols["artist"] == artists
assert assoc_cols["category"] == category
assert assoc_cols["groups"] == []
assert assoc_cols["list"] == lists
assert assoc_cols["character"] == characters
assert assoc_cols["collection"] == []
assert assoc_cols["parody"] == parodies
assert assoc_cols["ext_infos"] == [ei1, ei2]
# upd
# changes
b.groups = ["delchange1", "delchange"]
b.artist = ["tartist"]
b.update_assoc_columns_from_db()
# changes should be reset
assert not b._committed_state
assert b.tag == tags
assert b.artist == artists
assert b.category == category
assert b.groups == []
assert b.list == lists
assert b.character == characters
assert b.collection == []
assert b.parody == parodies
assert b.ext_infos == [ei1, ei2]
def test_diff(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
# not testing change_str
b1_data = dict(
id=None,
title_eng="Same",
title_foreign="Different1",
language_id=1,
pages=25,
status_id=1,
my_rating=4.3,
category=["Manga"],
collection=["Diff collection1"],
groups=["Artistgroup"],
artist=["Diff1", "Diff2"],
parody=["Blabla"],
character=["Char1", "Char2", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 6, 3),
note=None,
favorite=0
)
b1 = Book(mdb, **b1_data)
b2_data = dict(
id=None,
title_eng="Same",
title_foreign="Different2",
language_id=1,
pages=27,
status_id=1,
my_rating=None,
category=["Manga"],
collection=["Diff collection2"],
groups=["Artistgroup"],
artist=["Diff", "Diff2", "Diff3"],
parody=["Blabla"],
character=["Char1", "Char5", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 4, 3),
note=None,
favorite=1
)
b2 = Book(mdb, **b2_data)
changes, change_str = b1.diff(b2)
changes_expected = dict(
title_foreign="Different2",
pages=27,
my_rating=None,
# added removed
collection=({"Diff collection2"}, {"Diff collection1"}),
artist=({"Diff", "Diff3"}, {"Diff1"}),
character=({"Char5"}, {"Char2"}),
last_change=datetime.date(2018, 4, 3),
favorite=1
)
assert changes == changes_expected
def test_add_rem_assoc(monkeypatch, setup_mdb_dir):
# _add/_remove assoc col
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
b = mdb.get_book(5)
tag_before = b.tag.copy()
tag_change = ["Test1", "Test2", "Blabla"]
# _add_associated_column_values doesnt commit
with mdb.db_con:
b._add_associated_column_values("tag", tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 5""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
with mdb.db_con:
b._remove_associated_column_values("tag", tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 5""").fetchone()
assert tag[0].split(";") == tag_before
def test_static_db_methods(monkeypatch, setup_mdb_dir):
# static db methods
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
tag_before = "Large Breasts;Nakadashi;Blowjob;Threesome;Bikini;Group Sex;Swimsuit".split(";")
tag_change = ["Test1", "Test2", "Blabla"]
# before is last arg so staticmethod can set attr on book if its loaded (in id_map)
Book.add_assoc_col_on_book_id(mdb, 13, "tag", tag_change, tag_before)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 13""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
Book.remove_assoc_col_on_book_id(mdb, 13, "tag", tag_change, tag_before + tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 13""").fetchone()
assert tag[0].split(";") == tag_before
# load book so its in id_map and make sure add_remove_assoc also sets attr on book
b = mdb.get_book(16)
tag_before = ("Blowjob;Ahegao;Megane;Happy Sex;Threesome;Group Sex;"
"Layer Cake;Selfcest".split(";"))
tag_change = ["Test3", "Test4", "Blablabla"]
# before is last arg so staticmethod can set attr on book if its loaded (in id_map)
Book.add_assoc_col_on_book_id(mdb, 16, "tag", tag_change, tag_before)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 16""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
# also set attr on book
assert b.tag[-3:] == tag_change
Book.remove_assoc_col_on_book_id(mdb, 16, "tag", tag_change, tag_before + tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 16""").fetchone()
assert tag[0].split(";") == tag_before
# also set attr on book
assert b.tag == tag_before
Book.set_favorite_id(mdb, 2, 1)
fav = db_con.execute("SELECT favorite FROM Books WHERE id = 2").fetchone()
assert 1 == fav[0]
b = mdb.get_book(7)
Book.set_favorite_id(mdb, 7, 1)
fav = db_con.execute("SELECT favorite FROM Books WHERE id = 7").fetchone()
assert 1 == fav[0]
# also set on book
assert b.favorite == 1
Book.rate_book_id(mdb, 3, 3.5)
rat = db_con.execute("SELECT my_rating FROM Books WHERE id = 3").fetchone()
assert 3.5 == rat[0]
b = mdb.get_book(8)
Book.rate_book_id(mdb, 8, 4.25)
rat = db_con.execute("SELECT my_rating FROM Books WHERE id = 8").fetchone()
assert 4.25 == rat[0]
# also set on book
assert b.my_rating == 4.25
def test_remove_book(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
import shutil
# copy cover
os.makedirs(os.path.join(tmpdir, "thumbs"))
cover_path = os.path.join(tmpdir, "thumbs", "16")
shutil.copyfile(os.path.join(tmpdir, os.pardir, "book_test_files", "16"), cover_path)
db_con = memdb
# book removed and all ext infos
b = mdb.get_book(16)
b.remove()
assert b._in_db is False
# deleted from id map
with pytest.raises(KeyError):
mdb.id_map[b.key]
b_row = db_con.execute("SELECT id FROM Books WHERE id = 16").fetchall()
assert not b_row
ei_rows = db_con.execute("SELECT id FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
assert not ei_rows
# cover deleted
assert not os.path.exists(cover_path)
def test_remove_extinfo(monkeypatch, setup_mdb_dir, caplog):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = mdb.get_book(16)
caplog.clear()
assert b.remove_ext_info(99) is None
assert caplog.record_tuples == [
("manga_db.manga", logging.ERROR, "No external info with id 99 found!")
]
assert b.remove_ext_info(18) == "https://www.tsumino.com/entry/43454"
assert len(b.ext_infos) == 1
assert b.ext_infos[0].id == 16
assert b.remove_ext_info(16)
assert not b.ext_infos
caplog.clear()
assert b.remove_ext_info(4939) is None
assert caplog.record_tuples == [
("manga_db.manga", logging.WARNING, "No external infos on book with id 16 or not"
" fetched from DB yet!")
]
def test_save_book(monkeypatch, setup_mdb_dir, caplog):
# save: _add _update
# incl! _update_assoc_cols -> "
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
# _add
ei_data = dict(
id=None,
book_id=None,
url="http://test1.com",
id_onpage='1111',
imported_from=1,
upload_date=datetime.date(2018, 4, 13),
uploader="Uploader",
censor_id=1,
rating=4.19,
ratings=165,
favorites=300,
downloaded=None,
last_update=None,
outdated=None,
)
b1_data = dict(
id=None,
title_eng="Add1",
title_foreign="Foreign1",
language_id=1,
pages=25,
chapter_status="Vol. 2 Ch. 14",
read_status=13,
status_id=1,
my_rating=None,
category=["Manga"],
collection=None,
groups=["Artistgroup"],
artist=["Diff1", "Diff2"],
parody=["Blabla"],
character=["Char1", "Char2", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 6, 3),
note=None,
favorite=None,
cover_timestamp=None,
nsfw=1
)
b1 = Book(mdb, **b1_data)
# since we later check that cover_timestamp gets saved as 0.0 if None
b1_data['cover_timestamp'] = 0.0
ei1 = ExternalInfo(mdb, b1, **ei_data)
ei2 = ExternalInfo(mdb, b1, **ei_data)
# will outdate extinfo 8
ei2.id_onpage = '43506'
b1.ext_infos = [ei1, ei2]
assert b1._in_db is False
bid, outdated = b1.save()
assert bid == 18
assert b1.id == 18
# in_db + id_map, committed reset
assert b1._in_db is True
assert mdb.id_map[b1.key] is b1
assert not b1._committed_state
book_info_db = all_book_info(db_con, 18, include_id=True)
assert len(book_info_db) == 2
# fav set correctly
assert book_info_db[0]["favorite"] == 0
assert b1.favorite == 0
compare_cols_row_book_data(b1, book_info_db[0], b1_data, special={"favorite": 0})
# outdated, list of ext info ids that outdated others
assert outdated == [20]
# extinfo saved
eis = db_con.execute("SELECT id, book_id, id_onpage FROM ExternalInfo "
"WHERE id > 18").fetchall()
assert len(eis) == 2
assert eis[0]["book_id"] == 18
assert eis[1]["book_id"] == 18
assert eis[0]["id_onpage"] == '1111'
assert eis[1]["id_onpage"] == '43506'
# add book with new lang
b2 = Book(mdb, title_eng="Test2", favorite=1, pages=11, status_id=1, nsfw=0)
b2.language = "Krababbl"
bid, _ = b2.save()
assert bid == 19
assert b2.id == 19
# /2 since we have double indirection id->name name->id
expected_lang_id = len(LANG_IDS) / 2 + 1
assert b2.language_id == expected_lang_id
lang = db_con.execute("SELECT id FROM Languages WHERE name = 'Krababbl'").fetchall()
assert lang
assert lang[0][0] == expected_lang_id
brow = db_con.execute("SELECT title_eng, favorite FROM Books WHERE id = 19").fetchone()
assert brow[0] == "Test2"
assert brow["favorite"] == 1
assert b2.favorite == 1
assert b2._in_db is True
assert not b2._committed_state
assert mdb.id_map[b2.key] is b2
# _update
bu1 = Book(mdb, id=None, title_eng="Kangofu-san ni Kintama Sakusei Saremashita",
title_foreign="看護婦さんにキンタマ搾精されました", in_db=False)
bu1.in_db = True
# test not updating when block_update kwarg is true
caplog.clear()
assert bu1.save(block_update=True) == (None, None)
assert caplog.record_tuples == [
("manga_db.manga", logging.DEBUG,
f"Book was found in DB(id 15) but saving was blocked due to "
"block_update option!")
]
bu2 = mdb.get_book(11)
# dont do anything if no changes
caplog.clear()
assert not bu2._committed_state
assert bu2.save() == (11, None)
assert caplog.record_tuples == [
("manga_db.manga", logging.DEBUG, "No changes to save for book with id 11")
]
assert not bu2._committed_state
before = bu2.export_for_db()
# empty assoc list to None
before.update({col: getattr(bu2, col) if getattr(bu2, col) else None
for col in bu2.ASSOCIATED_COLUMNS})
bu2.language = "adlalad"
change = {
"title_eng": "Altered",
"language_id": 3,
"my_rating": 4.75,
"favorite": 1,
# removed and added
"tag": ("Large Breasts;Test33;Nakadashi;Ahegao;Gender Bender;Dark Skin;Elf;Body Swap"
";Bondage;Filming;Test Tag".split(";")),
# added
"artist": ["Taniguchi-san", "Newartist"],
# same
"category": ["Manga"],
# none added
"character": ["Char111", "Char222"]
}
bu2.update_from_dict(change)
before.update(change)
bid, _ = bu2.save()
book_info_db = all_book_info(db_con, 11, include_id=True)
compare_cols_row_book_data(bu2, book_info_db, before,
special={"last_change": datetime.date.today()})
# committed reset
assert not bu2._committed_state
# last_change
assert bu2.last_change == datetime.date.today()
assert book_info_db["last_change"] == datetime.date.today()
bu3 = mdb.get_book(7)
assert not bu3._committed_state
before = bu3.export_for_db()
# empty assoc list to None
before.update({col: getattr(bu3, col) if getattr(bu3, col) else None
for col in bu3.ASSOCIATED_COLUMNS})
change = {
"title_foreign": "ForeignAltered",
"pages": 13,
"note": "Note blabla",
# set None
"tag": None,
# set None
"artist": None,
# changed
"category": ["Manga"],
# none added
"collection": ["Col1", "Col2"],
"groups": ["Grp1", "Grp2", "Senpenbankashiki"]
}
bu3.update_from_dict(change)
before.update(change)
bid, _ = bu3.save()
book_info_db = all_book_info(db_con, 7, include_id=True)
compare_cols_row_book_data(bu3, book_info_db, before,
special={"last_change": datetime.date.today()})
# committed reset
assert not bu3._committed_state
# last_change
assert bu3.last_change == datetime.date.today()
assert book_info_db["last_change"] == datetime.date.today()
assoc_concat = {
"tag": "tags", "artist": "artists", "category": "categories", "character": "characters",
"collection": "collections", "groups": "groups", "list": "lists", "parody": "parodies"
}
def compare_cols_row_book_data(book, row, data, special=None):
if special is None:
special = {}
for col in Book.COLUMNS:
row_val = row[col]
data_val = data[col]
if col in special:
# specific values that are incorrect in data
assert row_val == special[col]
assert getattr(book, col) == special[col]
elif data_val is None:
# use is comparison for None
assert row_val is None
assert getattr(book, col) is None
else:
assert row_val == data_val
assert getattr(book, col) == data_val
for col in Book.ASSOCIATED_COLUMNS:
if col == "ext_infos":
continue
# look up plural of col to get name of concat assoc col
col_assoc_concat = assoc_concat[col]
row_val = row[col_assoc_concat]
if row_val is not None:
# row_val is concatted values
# need sorted to compare (or use set)
row_val = sorted(row_val.split(";")) if ";" in row_val else [row_val]
# need sorted to compare (or use set)
data_val = sorted(data[col]) if data[col] else None
book_val = getattr(book, col)
book_val = sorted(book_val) if book_val else book_val
if col in special:
# specific values that are incorrect in data
assert row_val == special[col]
assert book_val == special[col]
elif data_val is None:
# assoc col doesnt return None only empty trackable
assert row_val is None
assert book_val == []
else:
assert row_val == data_val
assert book_val == data_val
| 36.960938 | 97 | 0.61023 | [
"MIT"
] | nilfoer/mangadb | tests/test_book.py | 23,697 | Python |
from datetime import timedelta
import json
from os import listdir
from os.path import isfile, join
import pr0gramm
import logging
__author__ = "Peter Wolf"
__mail__ = "[email protected]"
__date__ = "2016-12-26"
LOG = logging.getLogger(__name__)
class DataSources:
IMAGE, THUMBNAIL, FULL_SIZE = range(3)
class DataCollector:
""" The DataCollector retrieves relevant data from
pr0gramm and saves it locally.
"""
def __init__(self, api, last_id=None):
self.api = api
self.last_id = last_id
self.age_threshold = timedelta(hours=5)
self.min_num_of_tags = 5
self.search_forwards = True
self.media_directory = "/tmp"
self.data_source = DataSources.IMAGE
self.annotation_file = "/tmp/annotation.txt"
self.json_dir = "/tmp"
self.download_media = True
self.save_json = False
self.use_local_storage = False
self.last_batch_size = None
def setAgeThreshold(self, days=0, hours=5, minutes=0, seconds=0):
self.age_threshold = timedelta(
days=days, hours=hours, minutes=minutes, seconds=seconds)
def setMinimumNumberOfTags(self, threshold):
self.min_num_of_tags = threshold
def setLastId(self, last_id):
self.last_id = last_id
def getLastId(self):
return self.last_id
def useBackwardsSearch(self):
self.search_forwards = False
def useForwardsSearch(self):
self.search_forwards = True
def setMediaDirectory(self, directory):
self.media_directory = directory
def setDataSource(self, source):
self.data_source = source
def setAnnotationFile(self, annotation_file):
self.annotation_file = annotation_file
def setJsonDir(self, directory):
self.json_dir = directory
def setDownloadMedia(self, download_media):
self.download_media = download_media
def setSaveJSON(self, save_json):
self.save_json = save_json
def setUseLocalStorage(self, use_local_storage):
self.use_local_storage = use_local_storage
def getSizeOfLastBatch(self):
return self.last_batch_size
def download(self, item):
if self.data_source == DataSources.IMAGE:
return self.api.downloadMedia(
item, save_dir=self.media_directory, file_name=item.id)
elif self.data_source == DataSources.THUMBNAIL:
return self.api.downloadThumbnail(
item, save_dir=self.media_directory, file_name=item.id)
elif self.data_source == DataSources.FULL_SIZE:
return self.api.downloadFullsize(
item, save_dir=self.media_directory, file_name=item.id)
else:
print "No valid data source chosen:", str(self.data_source)
return None
def writeAnnotation(self, item, media_path):
# Read the current annotation file
content = []
if isfile(self.annotation_file):
with open(self.annotation_file, "r") as f:
content = f.readlines()
# write every item as a line with the following structure:
# ID;IMAGE_PATH;AMOUNT_OF_TAGS;...TAG_TEXT;TAG_CONFIDENCE;...
new_line = str(item.id) + ";"
new_line += str(media_path) + ";"
new_line += str(len(item.tags)) + ";"
new_line += ";".join([str(tag.getText()) + ";" +
str(tag.getConfidence()) for tag in item.tags])
# Check if the item already has an entry in the annotation file
# and replace it.
contained = False
for i in range(len(content)):
if content[i].strip().startswith(str(item.id)):
content[i] = new_line
contained = True
break
# If no entry already exists, add a new line for the item
if not contained:
content.append(new_line)
# Write the new content to the file.
with open(self.annotation_file, "w") as f:
for line in content:
f.write(line.strip() + "\n")
def getItemsFromAPI(self):
if self.search_forwards:
return self.api.getItemsNewer(self.last_id)
else:
return self.api.getItemsOlder(self.last_id)
def getItemsFromLocalStorage(self):
json_files = [join(self.json_dir, f) for f in listdir(self.json_dir)
if isfile(join(self.json_dir, f)) and f.endswith(".json")]
data = []
for json_file in json_files:
with open(json_file, "r") as f:
json_item = json.load(f)
item = pr0gramm.Item.Item.parseFromJSON(json_item)
if not self.last_id \
or (self.search_forwards and item.getSortId() > self.last_id) \
or (not self.search_forwards and item.getSortId() < self.last_id):
data.append(item)
data.sort(reverse=True)
return data
def collectDataBatch(self, data=[]):
# retrieve data if none has been given
if not data:
if self.use_local_storage:
data = self.getItemsFromLocalStorage()
else:
data = self.getItemsFromAPI()
if not data:
return
# filter data based on age and tags
valid_data = []
for item in data:
if item.getAge() >= self.age_threshold and len(item.tags) > 0:
valid_data.append(item)
# save size of collected data batch
self.last_batch_size = len(valid_data)
if not valid_data:
return
# save id of last item to fit age criteria in search direction
if self.search_forwards:
self.last_id = valid_data[0].getSortId()
else:
self.last_id = valid_data[-1].getSortId()
for item in valid_data:
if self.download:
# download media
target_path = self.download(item)
if target_path:
# write id(s), link to media and tags to file
self.writeAnnotation(item, target_path)
if self.save_json:
with open(self.json_dir + "/" + str(item.id) + ".json", "w") as f:
json.dump(item.asDict(), f)
return self.last_id
| 33.208333 | 90 | 0.601317 | [
"MIT"
] | BigPeet/pr0tagger | src/data_collection/data_collector.py | 6,376 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
import binascii
from decimal import Decimal
from enum import Enum
from io import BytesIO
import json
from struct import pack, unpack
import http.client
import urllib.parse
from test_framework.qtumconfig import COINBASE_MATURITY, INITIAL_BLOCK_REWARD
from test_framework.qtum import convert_btc_address_to_qtum
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
hex_str_to_bytes,
)
from test_framework.messages import CBlockHeader
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
class ReqType(Enum):
JSON = 1
BIN = 2
HEX = 3
class RetType(Enum):
OBJ = 1
BYTES = 2
JSON = 3
def filter_output_indices_by_value(vouts, value):
for vout in vouts:
if vout['value'] == value:
yield vout['n']
class RESTTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-rest"], []]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body='', status=200, ret_type=RetType.JSON):
rest_uri = '/rest' + uri
if req_type == ReqType.JSON:
rest_uri += '.json'
elif req_type == ReqType.BIN:
rest_uri += '.bin'
elif req_type == ReqType.HEX:
rest_uri += '.hex'
conn = http.client.HTTPConnection(self.url.hostname, self.url.port)
self.log.debug('%s %s %s', http_method, rest_uri, body)
if http_method == 'GET':
conn.request('GET', rest_uri)
elif http_method == 'POST':
conn.request('POST', rest_uri, body)
resp = conn.getresponse()
assert_equal(resp.status, status)
if ret_type == RetType.OBJ:
return resp
elif ret_type == RetType.BYTES:
return resp.read()
elif ret_type == RetType.JSON:
return json.loads(resp.read().decode('utf-8'), parse_float=Decimal)
def run_test(self):
self.url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mine blocks and send Bitcoin to node 1")
# Random address so node1's balance doesn't increase
not_related_address = convert_btc_address_to_qtum("2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ")
self.nodes[0].generate(1)
self.sync_all()
for i in range(0, COINBASE_MATURITY, 100):
self.nodes[1].generatetoaddress(100, not_related_address)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), INITIAL_BLOCK_REWARD)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.log.info("Test the /tx URI")
json_obj = self.test_rest_request("/tx/{}".format(txid))
assert_equal(json_obj['txid'], txid)
# Check hex format response
hex_response = self.test_rest_request("/tx/{}".format(txid), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than_or_equal(int(hex_response.getheader('content-length')),
json_obj['size']*2)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
self.log.info("Query an unspent TXO using the /getutxos URI")
self.nodes[1].generatetoaddress(1, not_related_address)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
# Check chainTip response
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1'))
self.log.info("Query a spent TXO using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
# Check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is no utxo in the response because this outpoint has been spent
assert_equal(len(json_obj['utxos']), 0)
# Check bitmap
assert_equal(json_obj['bitmap'], "0")
self.log.info("Query two TXOs using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}/{}-{}".format(*(spending + spent)))
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
self.log.info("Query the TXOs using the /getutxos URI with a binary response")
bin_request = b'\x01\x02'
for txid, n in [spending, spent]:
bin_request += hex_str_to_bytes(txid)
bin_request += pack("i", n)
bin_response = self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES)
output = BytesIO(bin_response)
chain_height, = unpack("<i", output.read(4))
response_hash = output.read(32)[::-1].hex()
assert_equal(bb_hash, response_hash) # check if getutxo's chaintip during calculation was fine
assert_equal(chain_height, COINBASE_MATURITY+2) # chain height must be 102
self.log.info("Test the /getutxos URI with and without /checkmempool")
# Create a transaction, check that it's found with /checkmempool, but
# not found without. Then confirm the transaction and check that it's
# found with or without /checkmempool.
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_obj = self.test_rest_request("/tx/{}".format(txid))
# get the spent output to later check for utxo (should be spent by then)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 0)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 0)
self.nodes[0].generate(1)
self.sync_all()
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
# Do some invalid requests
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.JSON, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ)
# Test limits
long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ)
long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200)
self.nodes[0].generate(1) # generate block to not affect upcoming tests
self.sync_all()
self.log.info("Test the /block, /blockhashbyheight and /headers URIs")
bb_hash = self.nodes[0].getbestblockhash()
# Check result if block does not exists
assert_equal(self.test_rest_request('/headers/1/0000000000000000000000000000000000000000000000000000000000000000'), [])
self.test_rest_request('/block/0000000000000000000000000000000000000000000000000000000000000000', status=404, ret_type=RetType.OBJ)
# Check result if block is not in the active chain
self.nodes[0].invalidateblock(bb_hash)
assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)), [])
self.test_rest_request('/block/{}'.format(bb_hash))
self.nodes[0].reconsiderblock(bb_hash)
# Check binary format
response = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_greater_than(int(response.getheader('content-length')), BLOCK_HEADER_SIZE)
response_bytes = response.read()
# Compare with block header
response_header = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_equal(int(response_header.getheader('content-length')), 181)
response_header_bytes = response_header.read()
assert_equal(response_bytes[:181], response_header_bytes)
# Check block hex format
response_hex = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_hex_bytes = response_hex.read().strip(b'\n')
assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)
# Compare with hex block header
response_header_hex = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_header_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_header_hex_bytes = response_header_hex.read(BLOCK_HEADER_SIZE*2)
assert_equal(binascii.hexlify(response_bytes[:BLOCK_HEADER_SIZE]), response_header_hex_bytes)
# Check json format
block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
assert_equal(block_json_obj['hash'], bb_hash)
assert_equal(self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']))['blockhash'], bb_hash)
# Check hex/bin format
resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash)
resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.BIN, ret_type=RetType.BYTES)
blockhash = resp_bytes[::-1].hex()
assert_equal(blockhash, bb_hash)
# Check invalid blockhashbyheight requests
resp = self.test_rest_request("/blockhashbyheight/abc", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: abc")
resp = self.test_rest_request("/blockhashbyheight/1000000", ret_type=RetType.OBJ, status=404)
assert_equal(resp.read().decode('utf-8').rstrip(), "Block height out of range")
resp = self.test_rest_request("/blockhashbyheight/-1", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: -1")
self.test_rest_request("/blockhashbyheight/", ret_type=RetType.OBJ, status=400)
# Compare with json block header
json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
assert_equal(len(json_obj), 1) # ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) # request/response hash should be the same
# Compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
assert_equal(json_obj[0][key], rpc_block_json[key])
# See if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
assert_equal(len(json_obj), 5) # now we should have 5 header objects
self.log.info("Test tx inclusion in the /mempool and /block URIs")
# Make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
self.sync_all()
# Check that there are exactly 3 transactions in the TX memory pool before generating the block
json_obj = self.test_rest_request("/mempool/info")
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# Check that there are our submitted transactions in the TX memory pool
json_obj = self.test_rest_request("/mempool/contents")
for i, tx in enumerate(txs):
assert tx in json_obj
assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2])
assert_equal(json_obj[tx]['depends'], txs[i - 1:i])
# Now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
# Check if the 3 tx show up in the new block
json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
non_coinbase_txs = {tx['txid'] for tx in json_obj['tx']
if 'coinbase' not in tx['vin'][0]}
assert_equal(non_coinbase_txs, set(txs))
# Check the same but without tx details
json_obj = self.test_rest_request("/block/notxdetails/{}".format(newblockhash[0]))
for tx in txs:
assert tx in json_obj['tx']
self.log.info("Test the /chaininfo URI")
bb_hash = self.nodes[0].getbestblockhash()
json_obj = self.test_rest_request("/chaininfo")
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest().main()
| 44.650746 | 153 | 0.663993 | [
"MIT"
] | 100milliondollars/NeuQ | test/functional/interface_rest.py | 14,958 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
MAPPING = {
"dynamic": False,
"properties": {
"classification_type": {"type": "keyword"},
"date": {"type": "date", "format": "strict_date_optional_time||epoch_millis"},
"global_metrics": {
"dynamic": False,
"properties": {
"field": {
"dynamic": False,
"properties": {
"id": {"type": "integer"},
"name": {
"type": "text",
"fields": {
# subfield
"raw": {"type": "keyword"}
},
},
"type": {"type": "keyword"},
},
},
"dataset": {
"dynamic": False,
"properties": {
"nb_classes": {"type": "integer"},
"support_train": {"type": "integer"},
},
},
"performance": {
"dynamic": False,
"properties": {
"test": {
"dynamic": False,
"properties": {
"macro": {
"dynamic": False,
"properties": {
"f1_score": {"type": "float"},
"precision": {"type": "float"},
"recall": {"type": "float"},
},
},
"micro": {
"dynamic": False,
"properties": {
"f1_score": {"type": "float"},
"precision": {"type": "float"},
"recall": {"type": "float"},
},
},
},
}
},
},
},
},
"id": {"type": "keyword"},
"language": {"type": "keyword"},
"local_metrics": {
"type": "nested",
"dynamic": False,
"properties": {
"dataset": {
"dynamic": False,
"properties": {
"support_test": {"type": "integer"},
"support_train": {"type": "integer"},
},
},
"field_class": {
"dynamic": False,
"properties": {
"id": {"type": "integer"},
"name": {"type": "keyword"},
},
},
"performance": {
"dynamic": False,
"properties": {
"test": {
"dynamic": False,
"properties": {
"f1_score": {"type": "float"},
"precision": {"type": "float"},
"recall": {"type": "float"},
},
}
},
},
},
},
"workflow": {"type": "keyword"},
},
}
EXPECTED_MAPPING_REPR = """_
├── classification_type Keyword
├── date Date
├── global_metrics {Object}
│ ├── dataset {Object}
│ │ ├── nb_classes Integer
│ │ └── support_train Integer
│ ├── field {Object}
│ │ ├── id Integer
│ │ ├── name Text
│ │ │ └── raw ~ Keyword
│ │ └── type Keyword
│ └── performance {Object}
│ └── test {Object}
│ ├── macro {Object}
│ │ ├── f1_score Float
│ │ ├── precision Float
│ │ └── recall Float
│ └── micro {Object}
│ ├── f1_score Float
│ ├── precision Float
│ └── recall Float
├── id Keyword
├── language Keyword
├── local_metrics [Nested]
│ ├── dataset {Object}
│ │ ├── support_test Integer
│ │ └── support_train Integer
│ ├── field_class {Object}
│ │ ├── id Integer
│ │ └── name Keyword
│ └── performance {Object}
│ └── test {Object}
│ ├── f1_score Float
│ ├── precision Float
│ └── recall Float
└── workflow Keyword
"""
EXPECTED_MAPPING_TREE_REPR = """<Mapping>\n%s""" % EXPECTED_MAPPING_REPR
EXPECTED_CLIENT_BOUND_MAPPING_REPR = """<IMapping>\n%s""" % EXPECTED_MAPPING_REPR
| 45.368794 | 90 | 0.239487 | [
"MIT"
] | leonardbinet/pandagg | tests/testing_samples/mapping_example.py | 6,699 | Python |
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from datetime import datetime
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
from Logger.app_logger import App_logger
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
class Training:
def __init__(self,train_path,test_path,val_path):
self.train_path = train_path
self.test_path = test_path
self.val_path = val_path
self.file_object = open("Training_Logs/ModelTrainingLog.txt", 'a+')
self.log_object = App_logger()
def train(self):
self.log_object.log(self.file_object,"Entered in to train method in Training class.Training started")
try:
x_train = []
for folder in os.listdir(self.train_path):
sub_path = self.train_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_train.append(img_arr)
x_test = []
for folder in os.listdir(self.test_path):
sub_path = self.test_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_test.append(img_arr)
x_val = []
for folder in os.listdir(self.val_path):
sub_path = self.val_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_val.append(img_arr)
self.log_object.log(self.file_object, "Entered in to train method in Training class.train,test,val split successfull")
train_x = np.array(x_train) / 255.0
test_x = np.array(x_test) / 255.0
val_x = np.array(x_val) / 255.0
train_datagen = ImageDataGenerator(rescale=1. / 255)
test_datagen = ImageDataGenerator(rescale=1. / 255)
val_datagen = ImageDataGenerator(rescale=1. / 255)
training_set = train_datagen.flow_from_directory(self.train_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
test_set = test_datagen.flow_from_directory(self.test_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
val_set = val_datagen.flow_from_directory(self.val_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
train_y = training_set.classes
test_y = test_set.classes
val_y = val_set.classes
IMAGE_SIZE = [224, 224]
vgg = VGG19(input_shape= IMAGE_SIZE + [3],weights='imagenet',include_top=False)
self.log_object.log(self.file_object, "Entered in to train method in Training class. Model successfully initialized")
for layer in vgg.layers:
layer.trainable = False
x = Flatten() (vgg.output)
prediction = Dense(5 ,activation='softmax') (x)
model = Model(inputs=vgg.input,outputs = prediction)
model.summary()
model.compile(loss = 'sparse_categorical_crossentropy',
optimizer='adam',metrics=['accuracy'])
self.log_object.log(self.file_object, "Entered in to train method in Training class.Model compile successfull")
file_path = 'vgg19_model/checkpoint-{epoch:02d}-{val_accuracy:.2f}.hdf5'
self.log_object.log(self.file_object,"check point directory created")
check_point = ModelCheckpoint(file_path,monitor='val_accuracy', verbose=1,save_best_only=True, mode='max')
start = datetime.now()
self.log_object.log(self.file_object, f"Entered in to train method in Training class.Training start time {start}")
history = model.fit(train_x,train_y,
validation_data= (val_x,val_y),
epochs=20,
callbacks = [check_point],
batch_size=64, shuffle=True)
duration = datetime.now() - start
self.log_object.log(self.file_object, f"Entered in to train method in Training class.Total time taken is {duration}")
model.save('mech_tools_model.h5')
self.log_object.log(self.file_object, f"Entered in to train method in Training class.model saved successfully")
# accuracies
plt.plot(history.history['accuracy'], label='train acc')
plt.plot(history.history['val_accuracy'], label='val acc')
plt.legend()
plt.savefig('vgg-acc-rps-1.png')
# loss
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.legend()
plt.savefig('vgg-loss-rps-1.png')
self.log_object.log(self.file_object, "Entered in to train method in Training class.model evaluation started")
model.evaluate(test_x, test_y, batch_size=32)
# predict
y_pred = model.predict(test_x)
y_pred = np.argmax(y_pred, axis=1)
self.log_object.log(self.file_object, f"Entered in to train method in Training class.classification report {classification_report(y_pred, test_y)}")
self.log_object.log(self.file_object, f"Entered in to train method in Training class.confusion matrix is{confusion_matrix(y_pred, test_y)}")
except Exception as e:
# logging the unsuccessful Training
self.log_object.log(self.file_object, 'Unsuccessful End of Training')
self.log_object.log(self.file_object,f"exception occured.exception is {e}")
raise Exception
self.file_object.close()
if __name__ == "__main__":
train_path = "final_dataset/train"
test_path = "final_dataset/test"
val_path = "final_dataset/val"
train_model = Training(train_path, test_path, val_path)
train_model.train() | 42.945652 | 160 | 0.552645 | [
"MIT"
] | aasir22/tools_classification | training.py | 7,902 | Python |
#
# example from CHiLL manual page 14
#
# permute 3 loops
#
from chill import *
source('permute123456.c')
destination('permute1modified.c')
procedure('mm')
loop(0)
known('ambn > 0')
known('an > 0')
known('bm > 0')
permute([3,1,2])
| 9.88 | 36 | 0.631579 | [
"MIT"
] | CompOpt4Apps/Artifact-DataDepSimplify | chill/examples/chill/testcases/permute1.script.py | 247 | Python |
import re
from scripts.features.feature_extractor import FeatureExtractor
from bs4 import BeautifulSoup
class ItemizationCountExtractor(FeatureExtractor):
def extract(self, post, extracted=None):
soup = BeautifulSoup(post.rendered_body, "html.parser")
count = len(soup.find_all("ul"))
return count
class ImageCountExtractor(FeatureExtractor):
def extract(self, post, extracted=None):
soup = BeautifulSoup(post.rendered_body, "html.parser")
count = len(soup.find_all("img"))
return count
class FormulaCountExtractor(FeatureExtractor):
def extract(self, post, extracted=None):
count = len(re.findall(r'\$.*?\$+', post.rendered_body))
return count
class ItemizationRatioExtractor(FeatureExtractor):
def __init__(self, text):
self.text = text
def extract(self, post, extracted=None):
soup = BeautifulSoup(post.rendered_body, "html.parser")
target_count = len(soup.find_all("ul"))
lines_count = len(self.text.split("。"))
ratio = target_count / lines_count if target_count != 0 else 0
return ratio
class ImageRatioExtractor(FeatureExtractor):
def __init__(self, text):
self.text = text
def extract(self, post, extracted=None):
soup = BeautifulSoup(post.rendered_body, "html.parser")
target_count = len(soup.find_all("img"))
lines_count = len(self.text.split("。"))
ratio = target_count / lines_count if target_count != 0 else 0
return ratio
class FormulaRatioExtractor(FeatureExtractor):
def __init__(self, text):
self.text = text
def extract(self, post, extracted=None):
target_count = len(re.findall(r'\$.*?\$+', post.rendered_body))
lines_count = len(self.text.split("。"))
ratio = target_count / lines_count if target_count != 0 else 0
return ratio
| 27.242857 | 71 | 0.668589 | [
"Apache-2.0"
] | chakki-works/elephant-sense | scripts/features/structure_extractor.py | 1,913 | Python |
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
import math
import numpy
import pixiedust
myLogger = pixiedust.getLogger(__name__)
def append(displayObject, arr, option):
if option is not None and displayObject.acceptOption(option["name"]):
arr.append(option)
def chartSize():
return {
'name': 'chartsize',
'description': 'Chart Size',
'metadata': {
'type': 'slider',
'max': 100,
'min': 50,
'default': 100
}
}
def clusterBy(displayObject):
return {
'name': 'clusterby',
'description': 'Cluster By',
'refresh': True,
'metadata': {
'type': "dropdown",
'values': ["None"] + sorted([f for f in displayObject.getFieldNames() if f not in displayObject.getKeyFields() and f not in displayObject.getValueFields()]),
'default': ""
},
'validate': lambda option:\
(option in displayObject.getFieldNames() and option not in displayObject.getKeyFields() and option not in displayObject.getValueFields(),\
"Cluster By value is already used in keys or values for this chart")
}
def timeSeries(displayObject):
if len(displayObject.getKeyFields()) == 1:
pdf = displayObject.getWorkingPandasDataFrame()
field = displayObject.getKeyFields()[0]
dtype = pdf[field].dtype.type if field in pdf else None
existingValue = displayObject.options.get("timeseries", 'false')
if dtype is not None and (dtype is not numpy.datetime64 or existingValue == 'true'):
return {
'name': 'timeseries',
'description': 'Time Series',
'metadata':{
'type': 'checkbox',
'default': 'false'
}
}
def barChart(displayObject):
options = []
options.append(chartSize())
options.append(clusterBy(displayObject))
append(displayObject, options, timeSeries(displayObject))
if not hasattr(displayObject, 'no_orientation') or displayObject.no_orientation is not True:
options.append({
'name': 'orientation',
'description': 'Orientation',
'metadata': {
'type': 'dropdown',
'values': ['vertical', 'horizontal'],
'default': "vertical"
}
})
if displayObject.options.get("clusterby") != None or len(displayObject.getValueFields()) > 1:
options.append({
'name': 'charttype',
'description': 'Type',
'metadata': {
'type': 'dropdown',
'values': ['grouped', 'stacked', 'subplots'],
'default': "grouped"
}
})
options.append({
'name': 'legend',
'description': 'Show legend',
'metadata': {
'type': 'checkbox',
'default': "true"
}
})
options.append({
'name': 'sortby',
'description': 'Sort By',
'metadata': {
'type': 'dropdown',
'values': ['Keys ASC', 'Keys DESC', 'Values ASC', 'Values DESC'],
'default': 'Keys ASC'
}
})
return options
def lineChart(displayObject):
options = []
options.append(chartSize())
options.append(clusterBy(displayObject))
append(displayObject, options, timeSeries(displayObject))
if displayObject.options.get("clusterby") != None or len(displayObject.getValueFields()) > 1:
options.append({
'name': 'lineChartType',
'description': 'Type',
'metadata': {
'type': 'dropdown',
'values': ['grouped', 'subplots'],
'default': "grouped"
}
})
options.append({
'name': 'legend',
'description': 'Show legend',
'metadata': {
'type': 'checkbox',
'default': "false"
}
})
options.append({
'name': 'logx',
'description': 'log scale on x',
'metadata': {
'type': 'checkbox',
'default': "false"
}
})
options.append({
'name': 'logy',
'description': 'log scale on y',
'metadata': {
'type': 'checkbox',
'default': "false"
}
})
return options
def histogram(displayObject):
options = []
options.append(chartSize())
if len(displayObject.getValueFields()) > 1:
append(displayObject, options, {
'name': 'histoChartType',
'description': 'Type',
'metadata': {
'type': 'dropdown',
'values': ['stacked', 'subplots'],
'default': "stacked"
}
})
count = len(displayObject.getWorkingPandasDataFrame().index)
default = math.sqrt(count)
vals = len(displayObject.getWorkingPandasDataFrame().groupby(displayObject.getValueFields()[0]).size())
options.append({
'name': 'binsize',
'description': 'Bin Count',
'metadata': {
'type': 'slider',
'max': int(max(vals, default) + 10),
'min': int(max((min(vals, default) - 10), 2)),
'default': int(default)
}
})
return options
def pieChart(displayObject):
options = []
options.append(chartSize())
return options
def scatterPlot(displayObject):
options = []
options.append(chartSize())
return options
commonOptions = {}
for f in [barChart,lineChart,histogram,pieChart,scatterPlot]:
commonOptions.update({f.__name__:f})
| 31.26601 | 169 | 0.540728 | [
"Apache-2.0"
] | elgalu/pixiedust | pixiedust/display/chart/renderers/commonOptions.py | 6,347 | Python |
import asyncio
import io
from PIL import Image
from PIL import ImageDraw
from discord import Colour
import datetime
import urllib
import urllib.request
import aiohttp
import re
from datetime import datetime, date, timedelta
from calendar import timegm
import time
from utils.database import userDatabase, tibiaDatabase
from config import highscores_categories, network_retry_delay
from utils.messages import EMOJI
from .general import log, global_online_list, get_local_timezone
# Constants
ERROR_NETWORK = 0
ERROR_DOESNTEXIST = 1
ERROR_NOTINDATABASE = 2
# Tibia.com URLs:
url_character = "https://secure.tibia.com/community/?subtopic=characters&name="
url_guild = "https://secure.tibia.com/community/?subtopic=guilds&page=view&GuildName="
url_guild_online = "https://secure.tibia.com/community/?subtopic=guilds&page=view&onlyshowonline=1&"
url_house = "https://secure.tibia.com/community/?subtopic=houses&page=view&houseid={id}&world={world}"
url_highscores = "https://secure.tibia.com/community/?subtopic=highscores&world={0}&list={1}&profession={2}¤tpage={3}"
KNIGHT = ["knight", "elite knight", "ek", "k", "kina", "eliteknight","elite"]
PALADIN = ["paladin", "royal paladin", "rp", "p", "pally", "royalpaladin", "royalpally"]
DRUID = ["druid", "elder druid", "ed", "d", "elderdruid", "elder"]
SORCERER = ["sorcerer", "master sorcerer", "ms", "s", "sorc", "mastersorcerer", "master"]
MAGE = DRUID + SORCERER + ["mage"]
NO_VOCATION = ["no vocation", "no voc", "novoc", "nv", "n v", "none", "no", "n", "noob", "noobie", "rook", "rookie"]
highscore_format = {"achievements": "{0} __achievement points__ are **{1}**, on rank **{2}**",
"axe": "{0} __axe fighting__ level is **{1}**, on rank **{2}**",
"club": "{0} __club fighting__ level is **{1}**, on rank **{2}**",
"distance": "{0} __distance fighting__ level is **{1}**, on rank **{2}**",
"fishing": "{0} __fishing__ level is **{1}**, on rank **{2}**",
"fist": "{0} __fist fighting__ level is **{1}**, on rank **{2}**",
"loyalty": "{0} __loyalty points__ are **{1}**, on rank **{2}**",
"magic": "{0} __magic level__ is **{1}**, on rank **{2}**",
"magic_ek": "{0} __magic level__ is **{1}**, on rank **{2}** (knights)",
"magic_rp": "{0} __magic level__ is **{1}**, on rank **{2}** (paladins)",
"shielding": "{0} __shielding__ level is **{1}**, on rank **{2}**",
"sword": "{0} __sword fighting__ level is **{1}**, on rank **{2}**"}
tibia_worlds = ["Amera", "Antica", "Astera", "Aurera", "Aurora", "Bellona", "Belobra", "Beneva", "Calmera", "Calva",
"Calvera", "Candia", "Celesta", "Chrona", "Danera", "Dolera", "Efidia", "Eldera", "Ferobra", "Fidera",
"Fortera", "Garnera", "Guardia", "Harmonia", "Honera", "Hydera", "Inferna", "Iona", "Irmada", "Julera",
"Justera", "Kenora", "Kronera", "Laudera", "Luminera", "Magera", "Menera", "Morta", "Mortera",
"Neptera", "Nerana", "Nika", "Olympa", "Osera", "Pacera", "Premia", "Pythera", "Guilia", "Refugia",
"Rowana", "Secura", "Serdebra", "Shivera", "Silvera", "Solera", "Tavara", "Thera", "Umera", "Unitera",
"Veludera", "Verlana", "Xantera", "Xylana", "Yanara", "Zanera", "Zeluna", "Honbra", "Noctera", "Vita",
"Duna", "Relembra", "Helera", "Tortura", "Macabra"]
def get_character_url(name):
"""Gets a character's tibia.com URL"""
return url_character + urllib.parse.quote(name.encode('iso-8859-1'))
@asyncio.coroutine
def get_highscores(server,category,pagenum, profession=0, tries=5):
"""Gets a specific page of the highscores
Each list element is a dictionary with the following keys: rank, name, value.
May return ERROR_NETWORK"""
url = url_highscores.format(server, category, profession, pagenum)
# Fetch website
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("get_highscores: Couldn't fetch {0}, {1}, page {2}, network error.".format(server, category,
pagenum))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_highscores(server, category, pagenum, profession, tries)
return ret
# Trimming content to reduce load
try:
start_index = content.index('<td style="width: 20%;" >Vocation</td>')
end_index = content.index('<div style="float: left;"><b>» Pages:')
content = content[start_index:end_index]
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("get_highscores: Couldn't fetch {0}, {1}, page {2}, network error.".format(server, category,
pagenum))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_highscores(server, category, pagenum, profession, tries)
return ret
if category == "loyalty":
regex_deaths = r'<td>([^<]+)</TD><td><a href="https://secure.tibia.com/community/\?subtopic=characters&name=[^"]+" >([^<]+)</a></td><td>[^<]+</TD><td>[^<]+</TD><td style="text-align: right;" >([^<]+)</TD></TR>'
pattern = re.compile(regex_deaths, re.MULTILINE + re.S)
matches = re.findall(pattern, content)
scoreList = []
for m in matches:
scoreList.append({'rank': m[0], 'name': m[1], 'value': m[2].replace(',', '')})
else:
regex_deaths = r'<td>([^<]+)</TD><td><a href="https://secure.tibia.com/community/\?subtopic=characters&name=[^"]+" >([^<]+)</a></td><td>[^<]+</TD><td style="text-align: right;" >([^<]+)</TD></TR>'
pattern = re.compile(regex_deaths, re.MULTILINE + re.S)
matches = re.findall(pattern, content)
scoreList = []
for m in matches:
scoreList.append({'rank': m[0], 'name': m[1], 'value': m[2].replace(',', '')})
return scoreList
@asyncio.coroutine
def get_server_online(server, tries=5):
"""Returns a list of all the online players in current server.
Each list element is a dictionary with the following keys: name, level"""
server = server.capitalize()
url = 'https://secure.tibia.com/community/?subtopic=worlds&world=' + server
onlineList = []
# Fetch website
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getServerOnline: Couldn't fetch {0}, network error.".format(server))
# This should return ERROR_NETWORK, but requires error handling where this function is used
return onlineList
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_server_online(server, tries)
return ret
while not content and tries > 0:
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
tries -= 1
# Trimming content to reduce load
try:
start_index = content.index('<div class="BoxContent"')
end_index = content.index('<div id="ThemeboxesColumn" >')
content = content[start_index:end_index]
except ValueError:
# Website fetch was incomplete due to a network error
if tries == 0:
log.error("getServerOnline: Couldn't fetch {0}, network error.".format(server))
# This should return ERROR_NETWORK, but requires error handling where this function is used
return onlineList
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_server_online(server, tries)
return ret
regex_members = r'<a href="https://secure.tibia.com/community/\?subtopic=characters&name=(.+?)" >.+?</a></td><td style="width:10%;" >(.+?)</td>'
pattern = re.compile(regex_members, re.MULTILINE + re.S)
m = re.findall(pattern, content)
# Check if list is empty
if m:
# Building dictionary list from online players
for (name, level) in m:
name = urllib.parse.unquote_plus(name)
onlineList.append({'name': name, 'level': int(level)})
return onlineList
@asyncio.coroutine
def get_guild_online(guildname, titlecase=True, tries=5):
"""Returns a guild's world and online member list in a dictionary.
The dictionary contains the following keys: name, logo_url, world and members.
The key members contains a list where each element is a dictionary with the following keys:
rank, name, title, vocation, level, joined.
Guilds are case sensitive on tibia.com so guildstats.eu is checked for correct case.
May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly."""
gstats_url = 'http://guildstats.eu/guild?guild=' + urllib.parse.quote(guildname)
guild = {}
# Fix casing using guildstats.eu if needed
# Sorry guildstats.eu :D
if not titlecase:
# Fetch website
try:
page = yield from aiohttp.get(gstats_url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0} from guildstats.eu, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Make sure we got a healthy fetch
try:
content.index('<div class="footer">')
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0} from guildstats.eu, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Check if the guild doesn't exist
if "<div>Sorry!" in content:
return ERROR_DOESNTEXIST
# Failsafe in case guildstats.eu changes their websites format
try:
content.index("General info")
content.index("Recruitment")
except Exception:
log.error("getGuildOnline: -IMPORTANT- guildstats.eu seems to have changed their websites format.")
return ERROR_NETWORK
startIndex = content.index("General info")
endIndex = content.index("Recruitment")
content = content[startIndex:endIndex]
m = re.search(r'<a href="set=(.+?)"', content)
if m:
guildname = urllib.parse.unquote_plus(m.group(1))
else:
guildname = guildname.title()
tibia_url = 'https://secure.tibia.com/community/?subtopic=guilds&page=view&GuildName=' + urllib.parse.quote(
guildname) + '&onlyshowonline=1'
# Fetch website
try:
page = yield from aiohttp.get(tibia_url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0}, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Trimming content to reduce load and making sure we got a healthy fetch
try:
startIndex = content.index('<div class="BoxContent"')
endIndex = content.index('<div id="ThemeboxesColumn" >')
content = content[startIndex:endIndex]
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("getGuildOnline: Couldn't fetch {0}, network error.".format(guildname))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_guild_online(guildname, titlecase, tries)
return ret
# Check if the guild doesn't exist
# Tibia.com has no search function, so there's no guild doesn't exist page cause you're not supposed to get to a
# guild that doesn't exists. So the message displayed is "An internal error has ocurred. Please try again later!".
if '<div class="Text" >Error</div>' in content:
if titlecase:
ret = yield from get_guild_online(guildname, False)
return ret
else:
return ERROR_DOESNTEXIST
# Regex pattern to fetch world, guildhall and founding date
m = re.search(r'founded on (\w+) on ([^.]+)', content)
if m:
guild['world'] = m.group(1)
m = re.search(r'Their home on \w+ is ([^\.]+)', content)
if m:
guild["guildhall"] = m.group(1)
# Logo URL
m = re.search(r'<IMG SRC=\"([^\"]+)\" W', content)
if m:
guild['logo_url'] = m.group(1)
# Regex pattern to fetch members
regex_members = r'<TR BGCOLOR=#[\dABCDEF]+><TD>(.+?)</TD>\s</td><TD><A HREF="https://secure.tibia.com/community/\?subtopic=characters&name=(.+?)">.+?</A> *\(*(.*?)\)*</TD>\s<TD>(.+?)</TD>\s<TD>(.+?)</TD>\s<TD>(.+?)</TD>'
pattern = re.compile(regex_members, re.MULTILINE + re.S)
m = re.findall(pattern, content)
guild['members'] = []
# Check if list is empty
if m:
# Building dictionary list from members
for (rank, name, title, vocation, level, joined) in m:
rank = '' if (rank == ' ') else rank
name = urllib.parse.unquote_plus(name)
joined = joined.replace(' ', '-')
guild['members'].append({'rank': rank, 'name': name, 'title': title,
'vocation': vocation, 'level': level, 'joined': joined})
guild['name'] = guildname
return guild
@asyncio.coroutine
def get_character(name, tries=5):
"""Returns a dictionary with a player's info
The dictionary contains the following keys: name, deleted, level, vocation, world, residence,
married, gender, guild, last,login, chars*.
*chars is list that contains other characters in the same account (if not hidden).
Each list element is dictionary with the keys: name, world.
May return ERROR_DOESNTEXIST or ERROR_NETWORK accordingly."""
try:
url = url_character + urllib.parse.quote(name.encode('iso-8859-1'))
except UnicodeEncodeError:
return ERROR_DOESNTEXIST
char = dict()
# Fetch website
try:
page = yield from aiohttp.get(url)
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("getPlayer: Couldn't fetch {0}, network error.".format(name))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_character(name, tries)
return ret
# Trimming content to reduce load
try:
startIndex = content.index('<div class="BoxContent"')
endIndex = content.index("<B>Search Character</B>")
content = content[startIndex:endIndex]
except ValueError:
# Website fetch was incomplete, due to a network error
if tries == 0:
log.error("getPlayer: Couldn't fetch {0}, network error.".format(name))
return ERROR_NETWORK
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
ret = yield from get_character(name, tries)
return ret
# Check if player exists
if "Name:</td><td>" not in content:
return ERROR_DOESNTEXIST
# TODO: Is there a way to reduce this part?
# Name
m = re.search(r'Name:</td><td>([^<,]+)', content)
if m:
char['name'] = m.group(1).strip()
# Deleted
m = re.search(r', will be deleted at ([^<]+)', content)
if m:
char['deleted'] = True
# Vocation
m = re.search(r'Vocation:</td><td>([^<]+)', content)
if m:
char['vocation'] = m.group(1)
# Level
m = re.search(r'Level:</td><td>(\d+)', content)
if m:
char['level'] = int(m.group(1))
# Use database levels for online characters
for onchar in global_online_list:
if onchar.split("_", 1)[1] == char['name']:
c = userDatabase.cursor()
c.execute("SELECT last_level FROM chars WHERE name LIKE ?", (char['name'],))
result = c.fetchone()
if result:
char['level'] = abs(result["last_level"])
c.close()
break
# World
m = re.search(r'World:</td><td>([^<]+)', content)
if m:
char['world'] = m.group(1)
# Residence (City)
m = re.search(r'Residence:</td><td>([^<]+)', content)
if m:
char['residence'] = m.group(1)
# Marriage
m = re.search(r'Married To:</td><td>?.+name=([^"]+)', content)
if m:
char['married'] = urllib.parse.unquote_plus(m.group(1), encoding='ISO-8859-1')
# Sex
m = re.search(r'Sex:</td><td>([^<]+)', content)
if m:
if m.group(1) == 'male':
char['gender'] = 'male'
else:
char['gender'] = 'female'
# Guild rank
m = re.search(r'Membership:</td><td>([^<]+)\sof the', content)
if m:
char['rank'] = m.group(1)
# Guild membership
m = re.search(r'GuildName=.*?([^&]+).+', content)
if m:
char['guild'] = urllib.parse.unquote_plus(m.group(1))
# House
m = re.search(r'House:</td><td> <a href=\"https://secure\.tibia\.com/community/\?subtopic=houses.+houseid=(\d+)'
r'&character=(?:[^&]+)&action=characters\" >([^<]+)</a> \(([^(]+)\) is paid until '
r'([A-z]+).*?;(\d+).*?;(\d+)', content)
if m:
char["house_id"] = m.group(1)
char["house"] = m.group(2)
char["house_town"] = m.group(3)
# Last login
m = re.search(r'Last Login:</td><td>([^<]+)', content)
if m:
lastLogin = m.group(1).replace(" ", " ").replace(",", "")
if "never" in lastLogin:
char['last_login'] = None
else:
char['last_login'] = lastLogin
# Discord owner
c = userDatabase.cursor()
c.execute("SELECT user_id FROM chars WHERE name LIKE ?", (char["name"],))
result = c.fetchone()
char["owner_id"] = None if result is None else result["user_id"]
# Update name, vocation and world for chars in database if necessary
c = userDatabase.cursor()
c.execute("SELECT vocation, name, id, world FROM chars WHERE name LIKE ?", (name,))
result = c.fetchone()
if result:
if result["vocation"] != char['vocation']:
c.execute("UPDATE chars SET vocation = ? WHERE id = ?", (char['vocation'], result["id"],))
log.info("{0}'s vocation was set to {1} from {2} during get_character()".format(char['name'],
char['vocation'],
result["vocation"]))
if result["name"] != char["name"]:
c.execute("UPDATE chars SET name = ? WHERE id = ?", (char['name'], result["id"],))
log.info("{0} was renamed to {1} during get_character()".format(result["name"], char['name']))
if result["world"] != char["world"]:
c.execute("UPDATE chars SET world = ? WHERE id = ?", (char['world'], result["id"],))
log.info("{0}'s world was set to {1} from {2} during get_character()".format(char['name'],
char['world'],
result["world"]))
#Skills from highscores
c = userDatabase.cursor()
for category in highscores_categories:
c.execute("SELECT "+category+","+category+"_rank FROM chars WHERE name LIKE ?", (name,))
result = c.fetchone()
if result:
if result[category] is not None and result[category+'_rank'] is not None:
char[category] = result[category]
char[category+'_rank'] = result[category+'_rank']
char["deaths"] = []
regex_deaths = r'valign="top" >([^<]+)</td><td>(.+?)</td></tr>'
pattern = re.compile(regex_deaths, re.MULTILINE + re.S)
matches = re.findall(pattern, content)
for m in matches:
death_time = m[0].replace(' ', ' ').replace(",", "")
death_level = ""
death_killer = ""
death_by_player = False
if m[1].find("Died") != -1:
regex_deathinfo_monster = r'Level (\d+) by ([^.]+)'
pattern = re.compile(regex_deathinfo_monster, re.MULTILINE + re.S)
m_deathinfo_monster = re.search(pattern, m[1])
if m_deathinfo_monster:
death_level = m_deathinfo_monster.group(1)
death_killer = m_deathinfo_monster.group(2)
else:
regex_deathinfo_player = r'Level (\d+) by .+?name=([^"]+)'
pattern = re.compile(regex_deathinfo_player, re.MULTILINE + re.S)
m_deathinfo_player = re.search(pattern, m[1])
if m_deathinfo_player:
death_level = m_deathinfo_player.group(1)
death_killer = urllib.parse.unquote_plus(m_deathinfo_player.group(2))
death_by_player = True
try:
char["deaths"].append({'time': death_time, 'level': int(death_level), 'killer': death_killer,
'byPlayer': death_by_player})
except ValueError:
# Some pvp deaths have no level, so they are raising a ValueError, they will be ignored for now.
continue
# Other chars
# note that an empty char list means the character is hidden
# otherwise you'd have at least the same char in the list
char['chars'] = []
try:
# See if there is a character list
startIndex = content.index("<B>Characters</B>")
content = content[startIndex:]
# Find characters
regex_chars = r'<TD WIDTH=10%><NOBR>([^<]+)[^?]+.+?VALUE=\"([^\"]+)'
pattern = re.compile(regex_chars, re.MULTILINE + re.S)
m = re.findall(pattern, content)
if m:
for (world, name) in m:
name = urllib.parse.unquote_plus(name)
char['chars'].append({'name': name, 'world': world})
except Exception:
pass
return char
def get_rashid_city() -> str:
"""Returns the city Rashid is currently in."""
offset = get_tibia_time_zone() - get_local_timezone()
# Server save is at 10am, so in tibia a new day starts at that hour
tibia_time = datetime.now() + timedelta(hours=offset - 10)
return ["Svargrond",
"Liberty Bay",
"Port Hope",
"Ankrahmun",
"Darashia",
"Edron",
"Carlin"][tibia_time.weekday()]
def get_monster(name):
"""Returns a dictionary with a monster's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, id, hp, exp, maxdmg, elem_physical, elem_holy,
elem_death, elem_fire, elem_energy, elem_ice, elem_earth, elem_drown, elem_lifedrain, senseinvis,
arm, image."""
# Reading monster database
c = tibiaDatabase.cursor()
c.execute("SELECT * FROM Creatures WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%"+name+"%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower() or len(result) == 1:
monster = result[0]
else:
return [x['title'] for x in result]
try:
if monster['health'] is None or monster['health'] < 1:
monster['health'] = None
c.execute("SELECT Items.title as name, percentage, min, max "
"FROM CreatureDrops, Items "
"WHERE Items.id = CreatureDrops.itemid AND creatureid = ? "
"ORDER BY percentage DESC",
(monster["id"],))
monster["loot"] = c.fetchall()
return monster
finally:
c.close()
def get_item(name):
"""Returns a dictionary containing an item's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, look_text, npcs_sold*, value_sell, npcs_bought*, value_buy.
*npcs_sold and npcs_bought are list, each element is a dictionary with the keys: name, city."""
# Reading item database
c = tibiaDatabase.cursor()
# Search query
c.execute("SELECT * FROM Items WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower() or len(result) == 1:
item = result[0]
else:
return [x['title'] for x in result]
try:
# Checking if item exists
if item is not None:
# Checking NPCs that buy the item
c.execute("SELECT NPCs.title, city, value "
"FROM Items, SellItems, NPCs "
"WHERE Items.name LIKE ? AND SellItems.itemid = Items.id AND NPCs.id = vendorid "
"ORDER BY value DESC", (name,))
npcs = []
value_sell = None
for npc in c:
name = npc["title"]
city = npc["city"].title()
if value_sell is None:
value_sell = npc["value"]
elif npc["value"] != value_sell:
break
# Replacing cities for special npcs and adding colors
if name == 'Alesar' or name == 'Yaman':
city = 'Green Djinn\'s Fortress'
item["color"] = Colour.green()
elif name == 'Nah\'Bob' or name == 'Haroun':
city = 'Blue Djinn\'s Fortress'
item["color"] = Colour.blue()
elif name == 'Rashid':
city = get_rashid_city()
item["color"] = Colour(0xF0E916)
elif name == 'Yasir':
city = 'his boat'
elif name == 'Briasol':
item["color"] = Colour(0xA958C4)
npcs.append({"name": name, "city": city})
item['npcs_sold'] = npcs
item['value_sell'] = value_sell
# Checking NPCs that sell the item
c.execute("SELECT NPCs.title, city, value "
"FROM Items, BuyItems, NPCs "
"WHERE Items.name LIKE ? AND BuyItems.itemid = Items.id AND NPCs.id = vendorid "
"ORDER BY value ASC", (name,))
npcs = []
value_buy = None
for npc in c:
name = npc["title"]
city = npc["city"].title()
if value_buy is None:
value_buy = npc["value"]
elif npc["value"] != value_buy:
break
# Replacing cities for special npcs
if name == 'Alesar' or name == 'Yaman':
city = 'Green Djinn\'s Fortress'
elif name == 'Nah\'Bob' or name == 'Haroun':
city = 'Blue Djinn\'s Fortress'
elif name == 'Rashid':
offset = get_tibia_time_zone() - get_local_timezone()
# Server save is at 10am, so in tibia a new day starts at that hour
tibia_time = datetime.now() + timedelta(hours=offset - 10)
city = [
"Svargrond",
"Liberty Bay",
"Port Hope",
"Ankrahmun",
"Darashia",
"Edron",
"Carlin"][tibia_time.weekday()]
elif name == 'Yasir':
city = 'his boat'
npcs.append({"name": name, "city": city})
item['npcs_bought'] = npcs
item['value_buy'] = value_buy
# Get creatures that drop it
c.execute("SELECT Creatures.title as name, CreatureDrops.percentage "
"FROM CreatureDrops, Creatures "
"WHERE CreatureDrops.creatureid = Creatures.id AND CreatureDrops.itemid = ? "
"ORDER BY percentage DESC", (item["id"],))
item["dropped_by"] = c.fetchall()
# Checking quest rewards:
c.execute("SELECT Quests.title FROM Quests, QuestRewards "
"WHERE Quests.id = QuestRewards.questid and itemid = ?", (item["id"],))
quests = c.fetchall()
item["quests"] = list()
for quest in quests:
item["quests"].append(quest["title"])
return item
finally:
c.close()
return
def parse_tibia_time(tibia_time: str) -> datetime:
"""Gets a time object from a time string from tibia.com"""
tibia_time = tibia_time.replace(",","").replace(" ", " ")
# Getting local time and GMT
t = time.localtime()
u = time.gmtime(time.mktime(t))
# UTC Offset
local_utc_offset = ((timegm(t) - timegm(u)) / 60 / 60)
# Extracting timezone
tz = tibia_time[-4:].strip()
try:
# Convert time string to time object
# Removing timezone cause CEST and CET are not supported
t = datetime.strptime(tibia_time[:-4].strip(), "%b %d %Y %H:%M:%S")
except ValueError:
log.error("parse_tibia_time: couldn't parse '{0}'".format(tibia_time))
return None
# Getting the offset
if tz == "CET":
utc_offset = 1
elif tz == "CEST":
utc_offset = 2
else:
log.error("parse_tibia_time: unknown timezone for '{0}'".format(tibia_time))
return None
# Add/subtract hours to get the real time
return t + timedelta(hours=(local_utc_offset - utc_offset))
def get_stats(level: int, vocation: str):
"""Returns a dictionary with the stats for a character of a certain vocation and level.
The dictionary has the following keys: vocation, hp, mp, cap."""
try:
level = int(level)
except ValueError:
return "bad level"
if level <= 0:
return "low level"
elif level > 2000:
return "high level"
vocation = vocation.lower().strip()
if vocation in KNIGHT:
hp = (level - 8) * 15 + 185
mp = (level - 0) * 5 + 50
cap = (level - 8) * 25 + 470
vocation = "knight"
elif vocation in PALADIN:
hp = (level - 8) * 10 + 185
mp = (level - 8) * 15 + 90
cap = (level - 8) * 20 + 470
vocation = "paladin"
elif vocation in MAGE:
hp = (level - 0) * 5 + 145
mp = (level - 8) * 30 + 90
cap = (level - 0) * 10 + 390
vocation = "mage"
elif vocation in NO_VOCATION:
vocation = "no vocation"
else:
return "bad vocation"
if level < 8 or vocation == "no vocation":
hp = (level - 0) * 5 + 145
mp = (level - 0) * 5 + 50
cap = (level - 0) * 10 + 390
exp = (50*pow(level, 3)/3) - 100*pow(level, 2) + (850*level/3) - 200
exp_tnl = 50*level*level - 150 * level + 200
return {"vocation": vocation, "hp": hp, "mp": mp, "cap": cap, "exp": int(exp), "exp_tnl": exp_tnl}
def get_share_range(level: int):
"""Returns the share range for a specific level
The returned value is a list with the lower limit and the upper limit in that order."""
return int(round(level * 2 / 3, 0)), int(round(level * 3 / 2, 0))
# TODO: Improve formatting to match /monster and /item
def get_spell(name):
"""Returns a dictionary containing a spell's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
c.execute("""SELECT * FROM Spells WHERE words LIKE ? OR name LIKE ? ORDER BY LENGTH(name) LIMIT 15""",
("%" + name + "%", "%" + name + "%"))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or result[0]["words"].lower() == name.lower() or len(result) == 1:
spell = result[0]
else:
return ["{name} ({words})".format(**x) for x in result]
spell["npcs"] = []
c.execute("""SELECT NPCs.title as name, NPCs.city, SpellNPCs.knight, SpellNPCs.paladin,
SpellNPCs.sorcerer, SpellNPCs.druid FROM NPCs, SpellNPCs
WHERE SpellNPCs.spellid = ? AND SpellNPCs.npcid = NPCs.id""", (spell["id"],))
result = c.fetchall()
for npc in result:
npc["city"] = npc["city"].title()
spell["npcs"].append(npc)
return spell
finally:
c.close()
def get_npc(name):
"""Returns a dictionary containing a NPC's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
# search query
c.execute("SELECT * FROM NPCs WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower or len(result) == 1:
npc = result[0]
else:
return [x["title"] for x in result]
npc["image"] = 0
c.execute("SELECT Items.name, Items.category, BuyItems.value FROM BuyItems, Items "
"WHERE Items.id = BuyItems.itemid AND BuyItems.vendorid = ?", (npc["id"],))
npc["sell_items"] = c.fetchall()
c.execute("SELECT Items.name, Items.category, SellItems.value FROM SellItems, Items "
"WHERE Items.id = SellItems.itemid AND SellItems.vendorid = ?", (npc["id"],))
npc["buy_items"] = c.fetchall()
return npc
finally:
c.close()
@asyncio.coroutine
def get_house(name, world = None):
"""Returns a dictionary containing a house's info, a list of possible matches or None.
If world is specified, it will also find the current status of the house in that world."""
c = tibiaDatabase.cursor()
try:
# Search query
c.execute("SELECT * FROM Houses WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or len(result) == 1:
house = result[0]
else:
return [x['name'] for x in result]
if world is None or world not in tibia_worlds:
house["fetch"] = False
return house
house["world"] = world
house["url"] = url_house.format(id=house["id"], world=world)
tries = 5
while True:
try:
page = yield from aiohttp.get(house["url"])
content = yield from page.text(encoding='ISO-8859-1')
except Exception:
if tries == 0:
log.error("get_house: Couldn't fetch {0} (id {1}) in {2}, network error.".format(house["name"],
house["id"],
world))
house["fetch"] = False
break
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
continue
# Trimming content to reduce load
try:
start_index = content.index("\"BoxContent\"")
end_index = content.index("</TD></TR></TABLE>")
content = content[start_index:end_index]
except ValueError:
if tries == 0:
log.error("get_house: Couldn't fetch {0} (id {1}) in {2}, network error.".format(house["name"],
house["id"],
world))
house["fetch"] = False
break
else:
tries -= 1
yield from asyncio.sleep(network_retry_delay)
continue
house["fetch"] = True
m = re.search(r'monthly rent is <B>(\d+)', content)
if m:
house['rent'] = int(m.group(1))
if "rented" in content:
house["status"] = "rented"
m = re.search(r'rented by <A?.+name=([^\"]+).+e has paid the rent until <B>([^<]+)</B>', content)
if m:
house["owner"] = urllib.parse.unquote_plus(m.group(1))
house["until"] = m.group(2).replace(" ", " ")
if "move out" in content:
house["status"] = "transferred"
m = re.search(r'will move out on <B>([^<]+)</B> \(time of daily server save\) and will pass the '
r'house to <A.+name=([^\"]+).+ for <B>(\d+) gold', content)
if m:
house["transfer_date"] =house["until"] = m.group(1).replace(" ", " ")
house["transferee"] = urllib.parse.unquote_plus(m.group(2))
house["transfer_price"] = int(m.group(3))
elif "auctioned" in content:
house["status"] = "auctioned"
if ". No bid has" in content:
house["status"] = "empty"
break
m = re.search(r'The auction will end at <B>([^\<]+)</B>\. '
r'The highest bid so far is <B>(\d+).+ by .+name=([^\"]+)\"', content)
if m:
house["auction_end"] = m.group(1).replace(" ", " ")
house["top_bid"] = int(m.group(2))
house["top_bidder"] = urllib.parse.unquote_plus(m.group(3))
break
return house
finally:
c.close()
def get_achievement(name):
"""Returns an achievement (dictionary), a list of possible matches or none"""
c = tibiaDatabase.cursor()
try:
# Search query
c.execute("SELECT * FROM Achievements WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or len(result) == 1:
return result[0]
else:
return [x['name'] for x in result]
finally:
c.close()
def get_tibia_time_zone() -> int:
"""Returns Germany's timezone, considering their daylight saving time dates"""
# Find date in Germany
gt = datetime.utcnow() + timedelta(hours=1)
germany_date = date(gt.year, gt.month, gt.day)
dst_start = date(gt.year, 3, (31 - (int(((5 * gt.year) / 4) + 4) % int(7))))
dst_end = date(gt.year, 10, (31 - (int(((5 * gt.year) / 4) + 1) % int(7))))
if dst_start < germany_date < dst_end:
return 2
return 1
def get_voc_abb(vocation: str) -> str:
"""Given a vocation name, it returns an abbreviated string"""
abbrev = {'none': 'N', 'druid': 'D', 'sorcerer': 'S', 'paladin': 'P', 'knight': 'K', 'elder druid': 'ED',
'master sorcerer': 'MS', 'royal paladin': 'RP', 'elite knight': 'EK'}
try:
return abbrev[vocation.lower()]
except KeyError:
return 'N'
def get_voc_emoji(vocation: str) -> str:
"""Given a vocation name, returns a emoji representing it"""
emoji = {'none': EMOJI[":hatching_chick:"], 'druid': EMOJI[":snowflake:"], 'sorcerer': EMOJI[":flame:"], 'paladin': EMOJI[":archery:"],
'knight': EMOJI[":shield:"], 'elder druid': EMOJI[":snowflake:"],
'master sorcerer': EMOJI[":flame:"], 'royal paladin': EMOJI[":archery:"],
'elite knight': EMOJI[":shield:"]}
try:
return emoji[vocation.lower()]
except KeyError:
return EMOJI[":question:"]
def get_pronouns(gender: str):
"""Gets a list of pronouns based on the gender given. Only binary genders supported, sorry."""
gender = gender.lower()
if gender == "female":
pronoun = ["she", "her", "her"]
elif gender == "male":
pronoun = ["he", "his", "him"]
else:
pronoun = ["it", "its", "it"]
return pronoun
def get_map_area(x, y, z, size=15, scale=8, crosshair=True):
"""Gets a minimap picture of a map area
size refers to the radius of the image in actual tibia sqm
scale is how much the image will be streched (1 = 1 sqm = 1 pixel)"""
c = tibiaDatabase.cursor()
c.execute("SELECT * FROM WorldMap WHERE z LIKE ?", (z,))
result = c.fetchone()
im = Image.open(io.BytesIO(bytearray(result['image'])))
im = im.crop((x-size, y-size, x+size, y+size))
im = im.resize((size*scale, size*scale))
if crosshair:
draw = ImageDraw.Draw(im)
width, height = im.size
draw.line((0, height/2, width, height/2), fill=128)
draw.line((width/2, 0, width/2, height), fill=128)
img_byte_arr = io.BytesIO()
im.save(img_byte_arr, format='png')
img_byte_arr = img_byte_arr.getvalue()
return img_byte_arr
| 43.123762 | 225 | 0.536678 | [
"Apache-2.0"
] | LadyKeladry/Guardian-Bot | NabBot-master/utils/tibia.py | 43,555 | Python |
#!/usr/bin/env python
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [Extension("freenect", ["freenect.pyx"],
libraries=['usb-1.0', 'freenect', 'freenect_sync'],
runtime_library_dirs=['/usr/local/lib', '/usr/local/lib64', '/usr/lib/'],
extra_compile_args=['-fPIC', '-I', '../../include/',
'-I', '/usr/include/libusb-1.0/',
'-I', '/usr/local/include/libusb-1.0',
'-I', '/usr/local/include',
'-I', '../c_sync/'])]
setup(
name = 'freenect',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
| 44.526316 | 98 | 0.471631 | [
"MIT"
] | HoEmpire/slambook2 | 3rdparty/meshlab-master/src/external/openkinect/wrappers/python/setup.py | 846 | Python |
from __future__ import division
import argparse
import os
import torch
from mmcv import Config
from mmdet import __version__
from mmdet.apis import (get_root_logger, init_dist, set_random_seed,
train_detector)
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument(
'--resume_from', help='the checkpoint file to resume from')
parser.add_argument(
'--validate',
action='store_true',
help='whether to evaluate the checkpoint during training')
parser.add_argument(
'--gpus',
type=int,
default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# init logger before other steps
logger = get_root_logger(cfg.log_level)
logger.info('Distributed training: {}'.format(distributed))
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}'.format(args.seed))
set_random_seed(args.seed)
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
train_dataset = build_dataset(cfg.data.train)
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__,
config=cfg.text,
CLASSES=train_dataset.CLASSES)
# add an attribute for visualization convenience
model.CLASSES = train_dataset.CLASSES
train_detector(
model,
train_dataset,
cfg,
distributed=distributed,
validate=args.validate,
logger=logger)
if __name__ == '__main__':
main()
| 31.570093 | 77 | 0.657194 | [
"Apache-2.0"
] | GioPais/ttfnet | tools/train.py | 3,378 | Python |
"""An example of jinja2 templating"""
from bareasgi import Application, HttpRequest, HttpResponse
import jinja2
import pkg_resources
import uvicorn
from bareasgi_jinja2 import Jinja2TemplateProvider, add_jinja2
async def http_request_handler(request: HttpRequest) -> HttpResponse:
"""Handle the request"""
return await Jinja2TemplateProvider.apply(
request,
'example1.html',
{'name': 'rob'}
)
async def handle_no_template(request: HttpRequest) -> HttpResponse:
"""This is what happens if there is no template"""
return await Jinja2TemplateProvider.apply(
request,
'notemplate.html',
{'name': 'rob'}
)
if __name__ == '__main__':
TEMPLATES = pkg_resources.resource_filename(__name__, "templates")
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(TEMPLATES),
autoescape=jinja2.select_autoescape(['html', 'xml']),
enable_async=True
)
app = Application()
add_jinja2(app, env)
app.http_router.add({'GET'}, '/example1', http_request_handler)
app.http_router.add({'GET'}, '/notemplate', handle_no_template)
uvicorn.run(app, port=9010)
| 26.111111 | 70 | 0.689362 | [
"Apache-2.0"
] | rob-blackbourn/bareASGI-jinja2 | examples/example1.py | 1,175 | Python |
from __future__ import print_function
import pprint
import os
import time
import msgpackrpc
import math
import msgpackrpc #install as admin: pip install msgpack-rpc-python
import msgpack
import sys
import inspect
import types
import re
import shutil
import numpy as np #pip install numpy
#==============================================================================
# Classes
#==============================================================================
class MsgpackMixin:
def to_msgpack(self, *args, **kwargs):
return self.__dict__ #msgpack.dump(self.to_dict(*args, **kwargs))
@classmethod
def from_msgpack(cls, encoded):
obj = cls()
obj.__dict__ = {k.decode('utf-8'): v for k, v in encoded.items()}
return obj
class AirSimImageType:
Scene = 0
DepthPlanner = 1
DepthPerspective = 2
DepthVis = 3
DisparityNormalized = 4
Segmentation = 5
SurfaceNormals = 6
class DrivetrainType:
MaxDegreeOfFreedom = 0
ForwardOnly = 1
class LandedState:
Landed = 0
Flying = 1
class Vector3r(MsgpackMixin):
x_val = np.float32(0)
y_val = np.float32(0)
z_val = np.float32(0)
def __init__(self, x_val = np.float32(0), y_val = np.float32(0), z_val = np.float32(0)):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
class Quaternionr(MsgpackMixin):
w_val = np.float32(0)
x_val = np.float32(0)
y_val = np.float32(0)
z_val = np.float32(0)
def __init__(self, x_val = np.float32(0), y_val = np.float32(0), z_val = np.float32(0), w_val = np.float32(1)):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
self.w_val = w_val
class Pose(MsgpackMixin):
position = Vector3r()
orientation = Quaternionr()
def __init__(self, position_val, orientation_val):
self.position = position_val
self.orientation = orientation_val
class CollisionInfo(MsgpackMixin):
has_collided = False
normal = Vector3r()
impact_point = Vector3r()
position = Vector3r()
penetration_depth = np.float32(0)
time_stamp = np.float32(0)
object_name = ""
object_id = -1
class GeoPoint(MsgpackMixin):
latitude = 0.0
longitude = 0.0
altitude = 0.0
class YawMode(MsgpackMixin):
is_rate = True
yaw_or_rate = 0.0
def __init__(self, is_rate = True, yaw_or_rate = 0.0):
self.is_rate = is_rate
self.yaw_or_rate = yaw_or_rate
class ImageRequest(MsgpackMixin):
camera_id = np.uint8(0)
image_type = AirSimImageType.Scene
pixels_as_float = False
compress = False
def __init__(self, camera_id, image_type, pixels_as_float = False, compress = True):
self.camera_id = camera_id
self.image_type = image_type
self.pixels_as_float = pixels_as_float
self.compress = compress
class ImageResponse(MsgpackMixin):
image_data_uint8 = np.uint8(0)
image_data_float = np.float32(0)
camera_position = Vector3r()
camera_orientation = Quaternionr()
time_stamp = np.uint64(0)
message = ''
pixels_as_float = np.float32(0)
compress = True
width = 0
height = 0
image_type = AirSimImageType.Scene
class CarControls(MsgpackMixin):
throttle = np.float32(0)
steering = np.float32(0)
brake = np.float32(0)
handbrake = False
is_manual_gear = False
manual_gear = 0
gear_immediate = True
def set_throttle(self, throttle_val, forward):
if (forward):
is_manual_gear = False
manual_gear = 0
throttle = abs(throttle_val)
else:
is_manual_gear = False
manual_gear = -1
throttle = - abs(throttle_val)
class CarState(MsgpackMixin):
speed = np.float32(0)
gear = 0
position = Vector3r()
velocity = Vector3r()
orientation = Quaternionr()
class AirSimClientBase:
def __init__(self, ip, port):
self.client = msgpackrpc.Client(msgpackrpc.Address(ip, port), timeout = 3600)
def ping(self):
return self.client.call('ping')
def reset(self):
self.client.call('reset')
def confirmConnection(self):
print('Waiting for connection: ', end='')
home = self.getHomeGeoPoint()
while ((home.latitude == 0 and home.longitude == 0 and home.altitude == 0) or
math.isnan(home.latitude) or math.isnan(home.longitude) or math.isnan(home.altitude)):
time.sleep(1)
home = self.getHomeGeoPoint()
print('X', end='')
print('')
def getHomeGeoPoint(self):
return GeoPoint.from_msgpack(self.client.call('getHomeGeoPoint'))
# basic flight control
def enableApiControl(self, is_enabled):
return self.client.call('enableApiControl', is_enabled)
def isApiControlEnabled(self):
return self.client.call('isApiControlEnabled')
def simSetSegmentationObjectID(self, mesh_name, object_id, is_name_regex = False):
return self.client.call('simSetSegmentationObjectID', mesh_name, object_id, is_name_regex)
def simGetSegmentationObjectID(self, mesh_name):
return self.client.call('simGetSegmentationObjectID', mesh_name)
# camera control
# simGetImage returns compressed png in array of bytes
# image_type uses one of the AirSimImageType members
def simGetImage(self, camera_id, image_type):
# because this method returns std::vector<uint8>, msgpack decides to encode it as a string unfortunately.
result = self.client.call('simGetImage', camera_id, image_type)
if (result == "" or result == "\0"):
return None
return result
# camera control
# simGetImage returns compressed png in array of bytes
# image_type uses one of the AirSimImageType members
def simGetImages(self, requests):
responses_raw = self.client.call('simGetImages', requests)
return [ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]
def getCollisionInfo(self):
return CollisionInfo.from_msgpack(self.client.call('getCollisionInfo'))
@staticmethod
def stringToUint8Array(bstr):
return np.fromstring(bstr, np.uint8)
@staticmethod
def stringToFloatArray(bstr):
return np.fromstring(bstr, np.float32)
@staticmethod
def listTo2DFloatArray(flst, width, height):
return np.reshape(np.asarray(flst, np.float32), (height, width))
@staticmethod
def getPfmArray(response):
return AirSimClientBase.listTo2DFloatArray(response.image_data_float, response.width, response.height)
@staticmethod
def get_public_fields(obj):
return [attr for attr in dir(obj)
if not (attr.startswith("_")
or inspect.isbuiltin(attr)
or inspect.isfunction(attr)
or inspect.ismethod(attr))]
@staticmethod
def to_dict(obj):
return dict([attr, getattr(obj, attr)] for attr in AirSimClientBase.get_public_fields(obj))
@staticmethod
def to_str(obj):
return str(AirSimClientBase.to_dict(obj))
@staticmethod
def write_file(filename, bstr):
with open(filename, 'wb') as afile:
afile.write(bstr)
def simSetPose(self, pose, ignore_collison):
self.client.call('simSetPose', pose, ignore_collison)
def simGetPose(self):
return self.client.call('simGetPose')
# helper method for converting getOrientation to roll/pitch/yaw
# https:#en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
@staticmethod
def toEulerianAngle(q):
z = q.z_val
y = q.y_val
x = q.x_val
w = q.w_val
ysqr = y * y
# roll (x-axis rotation)
t0 = +2.0 * (w*x + y*z)
t1 = +1.0 - 2.0*(x*x + ysqr)
roll = math.atan2(t0, t1)
# pitch (y-axis rotation)
t2 = +2.0 * (w*y - z*x)
if (t2 > 1.0):
t2 = 1
if (t2 < -1.0):
t2 = -1.0
pitch = math.asin(t2)
# yaw (z-axis rotation)
t3 = +2.0 * (w*z + x*y)
t4 = +1.0 - 2.0 * (ysqr + z*z)
yaw = math.atan2(t3, t4)
return (pitch, roll, yaw)
@staticmethod
def toQuaternion(pitch, roll, yaw):
t0 = math.cos(yaw * 0.5)
t1 = math.sin(yaw * 0.5)
t2 = math.cos(roll * 0.5)
t3 = math.sin(roll * 0.5)
t4 = math.cos(pitch * 0.5)
t5 = math.sin(pitch * 0.5)
q = Quaternionr()
q.w_val = t0 * t2 * t4 + t1 * t3 * t5 #w
q.x_val = t0 * t3 * t4 - t1 * t2 * t5 #x
q.y_val = t0 * t2 * t5 + t1 * t3 * t4 #y
q.z_val = t1 * t2 * t4 - t0 * t3 * t5 #z
return q
@staticmethod
def wait_key(message = ''):
''' Wait for a key press on the console and return it. '''
if message != '':
print (message)
result = None
if os.name == 'nt':
import msvcrt
result = msvcrt.getch()
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result
@staticmethod
def read_pfm(file):
""" Read a pfm file """
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
header = str(bytes.decode(header, encoding='utf-8'))
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(r'^(\d+)\s(\d+)\s$', temp_str)
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
# DEY: I don't know why this was there.
#data = np.flipud(data)
file.close()
return data, scale
@staticmethod
def write_pfm(file, image, scale=1):
""" Write a pfm file """
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))
temp_str = '%d %d\n' % (image.shape[1], image.shape[0])
file.write(temp_str.encode('utf-8'))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
temp_str = '%f\n' % scale
file.write(temp_str.encode('utf-8'))
image.tofile(file)
@staticmethod
def write_png(filename, image):
""" image must be numpy array H X W X channels
"""
import zlib, struct
buf = image.flatten().tobytes()
width = image.shape[1]
height = image.shape[0]
# reverse the vertical line order and add null bytes at the start
width_byte_4 = width * 4
raw_data = b''.join(b'\x00' + buf[span:span + width_byte_4]
for span in range((height - 1) * width_byte_4, -1, - width_byte_4))
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack("!I", len(data)) +
chunk_head +
struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head)))
png_bytes = b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
AirSimClientBase.write_file(filename, png_bytes)
# ----------------------------------- Multirotor APIs ---------------------------------------------
class MultirotorClient(AirSimClientBase, object):
def __init__(self, ip = ""):
if (ip == ""):
ip = "127.0.0.1"
super(MultirotorClient, self).__init__(ip, 41451)
def armDisarm(self, arm):
return self.client.call('armDisarm', arm)
def takeoff(self, max_wait_seconds = 15):
return self.client.call('takeoff', max_wait_seconds)
def land(self, max_wait_seconds = 60):
return self.client.call('land', max_wait_seconds)
def goHome(self):
return self.client.call('goHome')
def hover(self):
return self.client.call('hover')
# query vehicle state
def getPosition(self):
return Vector3r.from_msgpack(self.client.call('getPosition'))
def getVelocity(self):
return Vector3r.from_msgpack(self.client.call('getVelocity'))
def getOrientation(self):
return Quaternionr.from_msgpack(self.client.call('getOrientation'))
def getLandedState(self):
return self.client.call('getLandedState')
def getGpsLocation(self):
return GeoPoint.from_msgpack(self.client.call('getGpsLocation'))
def getPitchRollYaw(self):
return self.toEulerianAngle(self.getOrientation())
#def getRCData(self):
# return self.client.call('getRCData')
def timestampNow(self):
return self.client.call('timestampNow')
def isApiControlEnabled(self):
return self.client.call('isApiControlEnabled')
def isSimulationMode(self):
return self.client.call('isSimulationMode')
def getServerDebugInfo(self):
return self.client.call('getServerDebugInfo')
# APIs for control
def moveByAngle(self, pitch, roll, z, yaw, duration):
return self.client.call('moveByAngle', pitch, roll, z, yaw, duration)
def moveByVelocity(self, vx, vy, vz, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode()):
return self.client.call('moveByVelocity', vx, vy, vz, duration, drivetrain, yaw_mode)
def moveByVelocityZ(self, vx, vy, z, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode()):
return self.client.call('moveByVelocityZ', vx, vy, z, duration, drivetrain, yaw_mode)
def moveOnPath(self, path, velocity, max_wait_seconds = 60, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1):
return self.client.call('moveOnPath', path, velocity, max_wait_seconds, drivetrain, yaw_mode, lookahead, adaptive_lookahead)
def moveToZ(self, z, velocity, max_wait_seconds = 60, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1):
return self.client.call('moveToZ', z, velocity, max_wait_seconds, yaw_mode, lookahead, adaptive_lookahead)
def moveToPosition(self, x, y, z, velocity, max_wait_seconds = 60, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1):
return self.client.call('moveToPosition', x, y, z, velocity, max_wait_seconds, drivetrain, yaw_mode, lookahead, adaptive_lookahead)
def moveByManual(self, vx_max, vy_max, z_min, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode()):
return self.client.call('moveByManual', vx_max, vy_max, z_min, duration, drivetrain, yaw_mode)
def rotateToYaw(self, yaw, max_wait_seconds = 60, margin = 5):
return self.client.call('rotateToYaw', yaw, max_wait_seconds, margin)
def rotateByYawRate(self, yaw_rate, duration):
return self.client.call('rotateByYawRate', yaw_rate, duration)
# ----------------------------------- Car APIs ---------------------------------------------
class CarClient(AirSimClientBase, object):
def __init__(self, ip = ""):
if (ip == ""):
ip = "127.0.0.1"
super(CarClient, self).__init__(ip, 42451)
def setCarControls(self, controls):
self.client.call('setCarControls', controls)
def getCarState(self):
state_raw = self.client.call('getCarState')
return CarState.from_msgpack(state_raw)
#FIXME: keep it and remove all upper that already is in AirSimClient.py
#==============================================================================
# Functions
#==============================================================================
def drive(client, throttle, steering):
car_controls.throttle = throttle
car_controls.steering = steering
client.setCarControls(car_controls)
def drive_forward(client, car_controls):
drive(client, 1.0, 0)
def drive_right(client, car_controls):
drive(client, 1.0, 10)
def drive_left(client, car_controls):
drive(client, 1.0, -10)
def save_image(i):
# get a sinlgle image from the car's camera
responses = client.simGetImages([ImageRequest(1, AirSimImageType.Scene)])
single_image = responses[0].image_data_uint8
# save the image
AirSimClientBase.write_file(os.path.normpath(IMAGEDIR + \
'/image_{}.png'.format(i)), single_image)
#==============================================================================
# Main
#==============================================================================
# Constants
IMAGEDIR = "images"
# Create an empty image directory
try:
shutil.rmtree(IMAGEDIR, ignore_errors=True)
os.stat(IMAGEDIR)
except:
os.mkdir(IMAGEDIR)
# Connect to AirSim
client = CarClient()
client.confirmConnection()
client.enableApiControl(True)
client.reset()
print('Connected')
i = 0
car_controls = CarControls()
while True:
drive_forward(client, car_controls)
i += 1
save_image(i)
print("image {} has been saved".format(i))
time.sleep(0.1)
drive_right(client, car_controls)
i += 1
save_image(i)
print("image {} has been saved".format(i))
time.sleep(0.1)
drive_forward(client, car_controls)
i += 1
save_image(i)
print("image {} has been saved".format(i))
time.sleep(0.1)
drive_left(client, car_controls)
i += 1
save_image(i)
print("image {} has been saved".format(i))
time.sleep(0.1)
if i >= 40:
break
## get RGBA camera images from the car
#responses = client.simGetImages([ImageRequest(1, AirSimImageType.Scene)])
## add image to queue
#imagequeue.append(responses[0].image_data_uint8)
## dump queue when it gets full
#if len(imagequeue) == QUEUESIZE:
# for i in range(QUEUESIZE):
# AirSimClientBase.write_file(os.path.normpath(IMAGEDIR + \
# '/image%03d.png' % i ), imagequeue[i])
# imagequeue.pop(0)
#collision_info = client.getCollisionInfo()
#if collision_info.has_collided:
# print("Collision at pos %s, normal %s, impact pt %s, penetration %f, name %s, obj id %d" % (
# pprint.pformat(collision_info.position),
# pprint.pformat(collision_info.normal),
# pprint.pformat(collision_info.impact_point),
# collision_info.penetration_depth, collision_info.object_name, collision_info.object_id))
# break
#time.sleep(0.1)
client.enableApiControl(False)
| 31.491499 | 181 | 0.599362 | [
"MIT"
] | ybettan/AirSimTensorFlow | run_demo.py | 20,375 | Python |
#!/usr/bin/env python
# coding: utf-8
import logging.config
import os
# Конфигурация базы данных
DB_CONFIG = {
'username': 'root',
'password': os.environ.get('MYSQL_TRADING_PASS'),
'host': '127.0.0.1',
'dbname': 'trading_db',
}
# Конфигурация журналирования
LOGGING = {
'version': 1,
'formatters': { # Форматирование сообщения
'main': {
'format': '[%(asctime)s] %(levelname)s %(module)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': { # Обработчикаи сообщений
'file_handler': {
'class': 'logging.FileHandler',
'filename': '/tmp/trading.log',
'formatter': 'main',
},
'streamlogger': {
'class': 'logging.StreamHandler',
'formatter': 'main',
},
},
'loggers': { # Логгеры
'prod_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'INFO',
},
'devel_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'DEBUG',
},
},
}
logging.config.dictConfig(LOGGING)
# Базовая конфигурация
class Config(object):
DEBUG = False
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG['username']}:{DB_CONFIG['password']}" \
f"@{DB_CONFIG['host']}/{DB_CONFIG['dbname']}?charset=utf8"
SQLALCHEMY_TRACK_MODIFICATIONS = False
LOGGER_NAME = 'devel_logger'
MAIL_SERVER = 'smtp.yandex.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TSL = False
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_USERNAME')
CELERY_BROKER_URL = 'redis://0.0.0.0:6379/'
CELERY_RESULT_BACKEND = 'redis://0.0.0.0:6379/'
CELERY_DEFAULT_QUEUE = 'request_handler_queue'
# Конфигурация выпуска
class ProductionConfig(Config):
DEBUG = False
LOGGER_NAME = 'prod_logger'
# Конфигурация разработки
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
LOGGER_NAME = 'devel_logger'
# Конфигурация тестирования
class TestConfig(Config):
DEBUG = True
TESTING = True
WTF_CSRF_ENABLED = False
LOGGER_NAME = 'devel_logger'
test_db_name = "test_trading_db"
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG['username']}:{DB_CONFIG['password']}" \
f"@{DB_CONFIG['host']}/{test_db_name}?charset=utf8"
# Текущая конфигурация
# --------------------------------------------------
_currentConfig = DevelopmentConfig
def getConfig():
return _currentConfig
def setConfig(config):
global _currentConfig
_currentConfig = config
# --------------------------------------------------
# Размер буффера данных, загружаемых в базу
chunkSize = 30000
| 25.637168 | 98 | 0.593027 | [
"Apache-2.0"
] | AsAsgard/trading_pr | request_handler/appconfig.py | 3,134 | Python |
#!/usr/bin/env python3
import random
import sys
"""
Markov chains name generator in Python
From http://roguebasin.roguelikedevelopment.org/index.php?title=Markov_chains_name_generator_in_Python .
"""
# from http://www.geocities.com/anvrill/names/cc_goth.html
PLACES = ['Adara', 'Adena', 'Adrianne', 'Alarice', 'Alvita', 'Amara', 'Ambika', 'Antonia', 'Araceli', 'Balandria', 'Basha',
'Beryl', 'Bryn', 'Callia', 'Caryssa', 'Cassandra', 'Casondrah', 'Chatha', 'Ciara', 'Cynara', 'Cytheria', 'Dabria', 'Darcei',
'Deandra', 'Deirdre', 'Delores', 'Desdomna', 'Devi', 'Dominique', 'Drucilla', 'Duvessa', 'Ebony', 'Fantine', 'Fuscienne',
'Gabi', 'Gallia', 'Hanna', 'Hedda', 'Jerica', 'Jetta', 'Joby', 'Kacila', 'Kagami', 'Kala', 'Kallie', 'Keelia', 'Kerry',
'Kerry-Ann', 'Kimberly', 'Killian', 'Kory', 'Lilith', 'Lucretia', 'Lysha', 'Mercedes', 'Mia', 'Maura', 'Perdita', 'Quella',
'Riona', 'Safiya', 'Salina', 'Severin', 'Sidonia', 'Sirena', 'Solita', 'Tempest', 'Thea', 'Treva', 'Trista', 'Vala', 'Winta']
###############################################################################
# Markov Name model
# A random name generator, by Peter Corbett
# http://www.pick.ucam.org/~ptc24/mchain.html
# This script is hereby entered into the public domain
###############################################################################
class Mdict:
def __init__(self):
self.d = {}
def __getitem__(self, key):
if key in self.d:
return self.d[key]
else:
raise KeyError(key)
def add_key(self, prefix, suffix):
if prefix in self.d:
self.d[prefix].append(suffix)
else:
self.d[prefix] = [suffix]
def get_suffix(self,prefix):
l = self[prefix]
return random.choice(l)
class MName:
"""
A name from a Markov chain
"""
def __init__(self, chainlen = 2):
"""
Building the dictionary
"""
if chainlen > 10 or chainlen < 1:
print("Chain length must be between 1 and 10, inclusive")
sys.exit(0)
self.mcd = Mdict()
oldnames = []
self.chainlen = chainlen
for l in PLACES:
l = l.strip()
oldnames.append(l)
s = " " * chainlen + l
for n in range(0,len(l)):
self.mcd.add_key(s[n:n+chainlen], s[n+chainlen])
self.mcd.add_key(s[len(l):len(l)+chainlen], "\n")
def New(self):
"""
New name from the Markov chain
"""
prefix = " " * self.chainlen
name = ""
suffix = ""
while True:
suffix = self.mcd.get_suffix(prefix)
if suffix == "\n" or len(name) > 9:
break
else:
name = name + suffix
prefix = prefix[1:] + suffix
return name.capitalize()
#############################################################################
if __name__ == "__main__":
li = []
for i in range(10):
li.append(MName().New())
for e in sorted(li):
print(e.lower())
| 34.288889 | 125 | 0.515554 | [
"MIT"
] | doc22940/Bash-Utils | lib/markov_usernames.py | 3,086 | Python |
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('security/', auth_views.PasswordChangeView.as_view(), name='security_settings'),
path('security/done/', auth_views.PasswordChangeDoneView.as_view(), name='password_change_done'),
path('account/', views.profile_settings, name='profile_settings'),
path('edit/', views.edit_profile, name='edit_profile'), #remove this later
] | 42 | 101 | 0.755411 | [
"MIT"
] | nathanielCherian/socSite | soc_site/usettings/urls.py | 462 | Python |
#!/usr/bin/python
#By Sun Jinyuan and Cui Yinglu, 2021
foldx_exe = "/user/sunjinyuan/soft/foldx"
def getparser():
parser = argparse.ArgumentParser(description=
'To run Foldx PositionScan with multiple threads, make sure' +
' that you have the foldx and your pdb in the same floder')
parser.add_argument("-s", '--pdbfile', help="The pdb file, the repaired one")
parser.add_argument("-nt", '--number_threads', help="How many threads to run the Foldx")
parser.add_argument("-c", '--chain_id', help="Chain ID")
args = parser.parse_args()
return args
def SOfile2mutlist(pdbname, chain_id, foldx_exe):
AA_list = ["Q", "W", "E", "R", "T", "Y", "I", "P", "A", "S", "D", "F", "G", "H", "K", "L", "V", "N", "M"]
try:
SO_file = open("SO_" + pdbname.replace("pdb", "fxout"), "r")
except FileNotFoundError:
os.system(foldx_exe + " --command=SequenceOnly --pdb=" + pdbname)
#os.system("/data/home/jsun/mhetase/FoldX/foldx5 --command=SequenceOnly --pdb=" + pdbname)
SO_file = open("SO_" + pdbname.replace("pdb", "fxout"), "r")
mut_lst = []
for line in SO_file:
lst = line.replace("\n", "").split("\t")
if len(lst) > 3:
if lst[1] == chain_id:
wild_AA = lst[3][0]
for AA in AA_list:
if AA != wild_AA:
mut_lst.append(lst[3] + AA + ";")
return mut_lst
def multi_threads(mut_lst, threads, pdbname, foldx_exe):
t = len(mut_lst) // (int(threads) - 1)
n = 0
for i in range(0, len(mut_lst), t):
submutlst = mut_lst[i:i + t]
n = n + 1
# indi_lst_name = "individual_list_"+str(n)+"_.txt"
sub_dir_name = "Subdirectory" + str(n)
indi_lst_name = sub_dir_name + "/individual_list.txt"
os.mkdir(sub_dir_name)
os.system("cp " + pdbname + " " + sub_dir_name)
with open(indi_lst_name, "w+") as ind_lst:
for mut in submutlst:
ind_lst.write(mut + "\n")
ind_lst.close()
readablefilename = sub_dir_name + "/List_Mutations_readable.txt"
with open(readablefilename, "a+") as readablefile:
# KA12G
x = 1
for mut in submutlst:
readablefile.write(str(x)+" "+mut[0]+" "+mut[2:-2]+" "+mut[-2]+"\n")
#readablefile.write(str(x) + " " + mut[0] + " " + mut[2:-1] + " " + mut[-1] + "\n")
x += 1
readablefile.close()
cfg = "command=BuildModel\npdb=" + pdbname + "\nmutant-file=individual_list.txt\nnumberOfRuns=5"
cfg_name = sub_dir_name + "/BM_" + str(n) + ".cfg"
with open(cfg_name, "w+") as cfg_file:
cfg_file.write(cfg)
cfg_file.close()
with open("todo_list.sh", "a+") as todo_file:
todo_file.write("cd " + sub_dir_name + "\n")
todo_file.write("nohup "+foldx_exe+" -f " + "BM_" + str(n) + ".cfg" + " &\n")
todo_file.write("cd ..\n")
todo_file.close()
if __name__ == "__main__":
import os
import argparse
args = getparser()
pdbname = args.pdbfile
threads = args.number_threads
chain_id = args.chain_id
#print(foldx_exe)
with open("todo_list.sh", "w+") as todo_file:
todo_file.close()
mut_lst = SOfile2mutlist(pdbname, chain_id, foldx_exe)
multi_threads(mut_lst, threads, pdbname, foldx_exe)
| 36.489583 | 109 | 0.549814 | [
"MIT"
] | JinyuanSun/my_bio_script | RFACA/foldx/foldx_scan.py | 3,503 | Python |
#
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
from oslo_config import cfg
from oslo_log import log
import copy
import time
from conductor import service
# from conductor.solver.optimizer import decision_path as dpath
# from conductor.solver.optimizer import best_first
# from conductor.solver.optimizer import greedy
from conductor.solver.optimizer import fit_first
from conductor.solver.optimizer import random_pick
from conductor.solver.request import demand
from conductor.solver.triage_tool.triage_data import TriageData
LOG = log.getLogger(__name__)
CONF = cfg.CONF
SOLVER_OPTS = [
]
CONF.register_opts(SOLVER_OPTS, group='solver')
class Optimizer(object):
# FIXME(gjung): _requests should be request (no underscore, one item)
def __init__(self, conf, _requests=None, _begin_time=None):
self.conf = conf
# start time of solving the plan
if _begin_time is not None:
self._begin_time = _begin_time
# self.search = greedy.Greedy(self.conf)
self.search = None
# self.search = best_first.BestFirst(self.conf)
if _requests is not None:
self.requests = _requests
# Were the 'simulators' ever used? It doesn't look like this.
# Since solver/simulator code needs cleansing before being moved to ONAP,
# I see no value for having this piece of code which is not letting us do
# that cleanup. Also, Shankar has confirmed solver/simulators folder needs
# to go away. Commenting out for now - may be should be removed permanently.
# Shankar (TODO).
# else:
# ''' for simulation '''
# req_sim = request_simulator.RequestSimulator(self.conf)
# req_sim.generate_requests()
# self.requests = req_sim.requests
def get_solution(self, num_solutions):
LOG.debug("search start for max {} solutions".format(num_solutions))
for rk in self.requests:
request = self.requests[rk]
LOG.debug("--- request = {}".format(rk))
decision_list = list()
LOG.debug("1. sort demands")
demand_list = self._sort_demands(request)
for d in demand_list:
LOG.debug(" demand = {}".format(d.name))
LOG.debug("2. search")
rand_counter = 10
while num_solutions == 'all' or num_solutions > 0:
LOG.debug("searching for the solution {}".format(len(decision_list) + 1))
st = time.time()
_copy_demand_list = copy.deepcopy(demand_list)
if not request.objective.goal:
LOG.debug("No objective function is provided. "
"Random pick algorithm is used")
self.search = random_pick.RandomPick(self.conf)
best_path = self.search.search(demand_list, request)
else:
LOG.debug("Fit first algorithm is used")
self.search = fit_first.FitFirst(self.conf)
best_path = self.search.search(demand_list,
request.objective, request)
LOG.debug("search delay = {} sec".format(time.time() - st))
demand_list = copy.deepcopy(_copy_demand_list)
if best_path is not None:
self.search.print_decisions(best_path)
rand_counter = 10
elif not request.objective.goal and rand_counter > 0 and self._has_candidates(request):
# RandomPick gave no candidates after applying constraints. If there are any candidates left
# lets' try again several times until some solution is found. When one of the demands is not unique
# it persists in the list all the time. In order to prevent infinite loop we need to have counter
rand_counter -= 1
LOG.debug("Incomplete random solution - repeat {}".format(rand_counter))
continue
else:
LOG.debug("no solution found")
break
# add the current solution to decision_list
decision_list.append(best_path.decisions)
#remove the candidate with "uniqueness = true"
self._remove_unique_candidate(request, best_path, demand_list)
if num_solutions != 'all':
num_solutions -= 1
self.search.triageSolver.getSolution(decision_list)
return decision_list
def _has_candidates(self, request):
for demand_name, demand in request.demands.items():
LOG.debug("Req Available resources: {} {}".format(demand_name, len(request.demands[demand_name].resources)))
if len(demand.resources) == 0:
LOG.debug("No more candidates for demand {}".format(demand_name))
return False
return True
def _remove_unique_candidate(self, _request, current_decision, demand_list):
# This method is to remove previous solved/used candidate from consideration
# when Conductor needs to provide multiple solutions to the user/client
for demand_name, candidate_attr in current_decision.decisions.items():
candidate_uniqueness = candidate_attr.get('uniqueness')
if candidate_uniqueness and candidate_uniqueness == 'true':
# if the candidate uniqueness is 'false', then remove
# that solved candidate from the translated candidates list
_request.demands[demand_name].resources.pop(candidate_attr.get('candidate_id'))
# update the demand_list
for demand in demand_list:
if(getattr(demand, 'name') == demand_name):
demand.resources = _request.demands[demand_name].resources
def _sort_demands(self, _request):
LOG.debug(" _sort_demands")
demand_list = []
# first, find loc-demand dependencies
# using constraints and objective functions
open_demand_list = []
for key in _request.constraints:
c = _request.constraints[key]
if c.constraint_type == "access_distance":
for dk in c.demand_list:
if _request.demands[dk].sort_base != 1:
_request.demands[dk].sort_base = 1
open_demand_list.append(_request.demands[dk])
for op in _request.objective.operand_list:
if op.function.func_type == "latency_between": #TODO do i need to include the region_group here?
if isinstance(op.function.loc_a, demand.Location):
if _request.demands[op.function.loc_z.name].sort_base != 1:
_request.demands[op.function.loc_z.name].sort_base = 1
open_demand_list.append(op.function.loc_z)
elif isinstance(op.function.loc_z, demand.Location):
if _request.demands[op.function.loc_a.name].sort_base != 1:
_request.demands[op.function.loc_a.name].sort_base = 1
open_demand_list.append(op.function.loc_a)
elif op.function.func_type == "distance_between":
if isinstance(op.function.loc_a, demand.Location):
if _request.demands[op.function.loc_z.name].sort_base != 1:
_request.demands[op.function.loc_z.name].sort_base = 1
open_demand_list.append(op.function.loc_z)
elif isinstance(op.function.loc_z, demand.Location):
if _request.demands[op.function.loc_a.name].sort_base != 1:
_request.demands[op.function.loc_a.name].sort_base = 1
open_demand_list.append(op.function.loc_a)
if len(open_demand_list) == 0:
init_demand = self._exist_not_sorted_demand(_request.demands)
open_demand_list.append(init_demand)
# second, find demand-demand dependencies
while True:
d_list = self._get_depended_demands(open_demand_list, _request)
for d in d_list:
demand_list.append(d)
init_demand = self._exist_not_sorted_demand(_request.demands)
if init_demand is None:
break
open_demand_list.append(init_demand)
return demand_list
def _get_depended_demands(self, _open_demand_list, _request):
demand_list = []
while True:
if len(_open_demand_list) == 0:
break
d = _open_demand_list.pop(0)
if d.sort_base != 1:
d.sort_base = 1
demand_list.append(d)
for key in _request.constraints:
c = _request.constraints[key]
# FIXME(snarayanan): "aic" only to be known by conductor-data
if c.constraint_type == "aic_distance":
if d.name in c.demand_list:
for dk in c.demand_list:
if dk != d.name and \
_request.demands[dk].sort_base != 1:
_request.demands[dk].sort_base = 1
_open_demand_list.append(
_request.demands[dk])
for op in _request.objective.operand_list:
if op.function.func_type == "latency_between": #TODO
if op.function.loc_a.name == d.name:
if op.function.loc_z.name in \
_request.demands.keys():
if _request.demands[
op.function.loc_z.name].sort_base != 1:
_request.demands[
op.function.loc_z.name].sort_base = 1
_open_demand_list.append(op.function.loc_z)
elif op.function.loc_z.name == d.name:
if op.function.loc_a.name in \
_request.demands.keys():
if _request.demands[
op.function.loc_a.name].sort_base != 1:
_request.demands[
op.function.loc_a.name].sort_base = 1
_open_demand_list.append(op.function.loc_a)
elif op.function.func_type == "distance_between":
if op.function.loc_a.name == d.name:
if op.function.loc_z.name in \
_request.demands.keys():
if _request.demands[
op.function.loc_z.name].sort_base != 1:
_request.demands[
op.function.loc_z.name].sort_base = 1
_open_demand_list.append(op.function.loc_z)
elif op.function.loc_z.name == d.name:
if op.function.loc_a.name in \
_request.demands.keys():
if _request.demands[
op.function.loc_a.name].sort_base != 1:
_request.demands[
op.function.loc_a.name].sort_base = 1
_open_demand_list.append(op.function.loc_a)
return demand_list
def _exist_not_sorted_demand(self, _demands):
not_sorted_demand = None
for key in _demands:
demand = _demands[key]
if demand.sort_base != 1:
not_sorted_demand = demand
break
return not_sorted_demand
| 44.513986 | 120 | 0.562093 | [
"Apache-2.0"
] | aalsudais/optf-has | conductor/conductor/solver/optimizer/optimizer.py | 12,731 | Python |
#!/usr/bin/python3
import time
def count_5s(number):
counter = 0
while (number % 5 == 0):
counter += 1
number /= 5
return counter
def last_5_digits(number):
number = number % (10 ** 5)
return number
def factorial(number):
borrowed_2s = 0
product = 1
for i in range(1, number+1):
if i % 2 == 0:
i = int(i/2)
borrowed_2s += 1
num_5s = count_5s(i)
if num_5s:
i = int(i/(5 ** num_5s))
borrowed_2s -= num_5s
product = last_5_digits(product * i)
product *= (2 ** borrowed_2s)
return product
def main(number):
return last_5_digits(
factorial(number)
)
if __name__ == '__main__':
n = 2560000
start_time = time.time()
result = main(n)
print(
"For {n}, took {time:.2f} seconds to find: {result}".format(
**{'n': n, 'time': time.time() - start_time, 'result': result})
)
| 19.36 | 79 | 0.528926 | [
"Unlicense"
] | COMU/kripton | factorial-trailing-digits/factorial_trailing_digits.py | 968 | Python |
#
# Copyright 2020 by 0x7c2, Simon Brecht.
# All rights reserved.
# This file is part of the Report/Analytic Tool - CPme,
# and is released under the "Apache License 2.0". Please see the LICENSE
# file that should have been included as part of this package.
#
from templates import check
import func
class check_performance_ispredundancy(check):
page = "Health.Firewall"
category = "Information"
title = "ISP Redundancy"
isFirewall = True
isManagement = False
minVersion = 8020
command = "cpstat fw | grep -A5 'ISP link table' | grep '|'"
isCommand = True
def run_check(self):
for line in self.commandOut:
fields = line.split('|')
ispname = fields[1]
ispstatus = fields[2]
isprole = fields[3]
if ispname != "Name":
ipstatus = "WARN"
if ispstatus == "OK":
state = "PASS"
self.add_result(self.title + " (Name: " + ispname + ")", state, "Role: " + isprole)
else:
self.add_result(self.title, "PASS", "disabled")
class check_performance_securexl_sum(check):
page = "Health.SecureXL"
category = "Information"
title = "SecureXL"
isFirewall = True
isManagement = False
minVersion = 8020
command = "fwaccel stat | grep -v Template"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "FAIL"
data = line.strip('\n').split('|')
if len(data) < 4 or data[1].replace(" ","") == "" or data[1].replace(" ","") == "Id":
continue
id = data[1].replace(" ", "")
type = data[2].replace(" ", "")
status = data[3].replace(" ", "")
if status != "enabled":
state = "WARN"
else:
state = "PASS"
feature = True
self.add_result(self.title + " (Instance: " + id + ", Name: " + type + ", Status: " + status + ")", state, "")
class check_performance_securexl_templates(check):
page = "Health.SecureXL"
category = "Templates"
title = "SecureXL"
isFirewall = True
isManagement = False
minVersion = 8020
command = "fwaccel stat| grep Templates | sed s/\ \ */\/g| sed s/Templates//g"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "FAIL"
data = line.strip('\n').split(":")
if len(data) < 2:
continue
if "disabled" in data[1]:
state = "WARN"
if "enabled" in data[1]:
state = "PASS"
self.add_result(self.title + " (" + data[0] + " Templates)", state, data[1])
class check_performance_securexl_statistics(check):
page = "Health.SecureXL"
category = "Statistics"
title = "SecureXL"
isFirewall = True
isManagement = False
minVersion = 8020
command = "fwaccel stats -s | sed 's/ */ /g' | sed 's/\t/ /g'"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "PASS"
data = line.strip('\n').split(":")
if len(data) < 2:
continue
field = data[0].strip(' ')
valraw = data[1].strip(' ').split(" ")
valnum = valraw[0]
valper = int(str(valraw[1]).replace('(','').replace(')','').replace('%',''))
if "Accelerated conns" in field and valper < 30:
state = "WARN"
if "Accelerated pkts" in field and valper < 50:
state = "WARN"
if "F2Fed" in field and valper > 40:
state = "FAIL"
self.add_result(self.title + " (" + field + ")", state, valnum + "(" + str(valper) + "%)")
class check_performance_vpn_accel(check):
page = "Health.SecureXL"
category = "Information"
title = "SecureXL VPN Acceleration"
isFirewall = True
isManagement = False
minVersion = 8020
command = "vpn accel stat"
isCommand = True
def run_check(self):
found = False
for line in self.commandErr:
if "acceleration is enabled" in line:
self.add_result(self.title, 'PASS', line.strip())
found = True
if not found:
self.add_result(self.title, 'FAIL', str(self.commandOut) + str(self.commandErr))
| 28.386861 | 113 | 0.61404 | [
"Apache-2.0"
] | olejak/cpme2 | performance.py | 3,889 | Python |
# encoding: utf-8
# module renderdoc
# from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd
# by generator 1.146
# no doc
# imports
import enum as __enum
from .SwigPyObject import SwigPyObject
class BlendStats(SwigPyObject):
""" Contains the statistics for blend state binds in a frame. """
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
calls = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""How many function calls were made."""
nulls = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""How many objects were unbound."""
redundants = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""How many calls made no change due to the existing bind being identical."""
sets = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""How many objects were bound."""
this = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
thisown = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is ''
| 31.295775 | 100 | 0.626463 | [
"MIT"
] | Lex-DRL/renderdoc-py-stubs | _pycharm_skeletons/renderdoc/BlendStats.py | 2,222 | Python |
from __future__ import division
from __future__ import print_function
from models.pytorch.pna.layer import PNALayer
from multitask_benchmark.util.train import execute_train, build_arg_parser
# Training settings
parser = build_arg_parser()
parser.add_argument('--self_loop', action='store_true', default=False, help='Whether to add self loops in aggregators')
parser.add_argument('--towers', type=int, default=4, help='Number of towers in MPNN layers')
parser.add_argument('--aggregation', type=str, default='sum', help='Type of aggregation')
parser.add_argument('--pretrans_layers', type=int, default=1, help='Number of MLP layers before aggregation')
parser.add_argument('--posttrans_layers', type=int, default=1, help='Number of MLP layers after aggregation')
args = parser.parse_args()
# The MPNNs can be considered a particular case of PNA networks with a single aggregator and no scalers (identity)
execute_train(gnn_args=dict(nfeat=None,
nhid=args.hidden,
nodes_out=None,
graph_out=None,
dropout=args.dropout,
device=None,
first_conv_descr=dict(layer_type=PNALayer,
args=dict(
aggregators=[args.aggregation],
scalers=['identity'], avg_d=None,
towers=args.towers,
self_loop=args.self_loop,
divide_input=False,
pretrans_layers=args.pretrans_layers,
posttrans_layers=args.posttrans_layers
)),
middle_conv_descr=dict(layer_type=PNALayer,
args=dict(
aggregators=[args.aggregation],
scalers=['identity'],
avg_d=None, towers=args.towers,
self_loop=args.self_loop,
divide_input=True,
pretrans_layers=args.pretrans_layers,
posttrans_layers=args.posttrans_layers
)),
fc_layers=args.fc_layers,
conv_layers=args.conv_layers,
skip=args.skip,
gru=args.gru,
fixed=args.fixed,
variable=args.variable), args=args)
| 60.72 | 119 | 0.452899 | [
"MIT"
] | KonstantinKlepikov/pna | multitask_benchmark/train/mpnn.py | 3,036 | Python |
import pytest
from spacy import displacy
from spacy.displacy.render import DependencyRenderer, EntityRenderer
from spacy.lang.fa import Persian
from spacy.tokens import Span, Doc
def test_displacy_parse_ents(en_vocab):
"""Test that named entities on a Doc are converted into displaCy's format."""
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
ents = displacy.parse_ents(doc)
assert isinstance(ents, dict)
assert ents["text"] == "But Google is starting from behind "
assert ents["ents"] == [
{"start": 4, "end": 10, "label": "ORG", "kb_id": "", "kb_url": "#"}
]
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"], kb_id="Q95")]
ents = displacy.parse_ents(doc)
assert isinstance(ents, dict)
assert ents["text"] == "But Google is starting from behind "
assert ents["ents"] == [
{"start": 4, "end": 10, "label": "ORG", "kb_id": "Q95", "kb_url": "#"}
]
def test_displacy_parse_ents_with_kb_id_options(en_vocab):
"""Test that named entities with kb_id on a Doc are converted into displaCy's format."""
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"], kb_id="Q95")]
ents = displacy.parse_ents(
doc, {"kb_url_template": "https://www.wikidata.org/wiki/{}"}
)
assert isinstance(ents, dict)
assert ents["text"] == "But Google is starting from behind "
assert ents["ents"] == [
{
"start": 4,
"end": 10,
"label": "ORG",
"kb_id": "Q95",
"kb_url": "https://www.wikidata.org/wiki/Q95",
}
]
def test_displacy_parse_deps(en_vocab):
"""Test that deps and tags on a Doc are converted into displaCy's format."""
words = ["This", "is", "a", "sentence"]
heads = [1, 1, 3, 1]
pos = ["DET", "VERB", "DET", "NOUN"]
tags = ["DT", "VBZ", "DT", "NN"]
deps = ["nsubj", "ROOT", "det", "attr"]
doc = Doc(en_vocab, words=words, heads=heads, pos=pos, tags=tags, deps=deps)
deps = displacy.parse_deps(doc)
assert isinstance(deps, dict)
assert deps["words"] == [
{"lemma": None, "text": words[0], "tag": pos[0]},
{"lemma": None, "text": words[1], "tag": pos[1]},
{"lemma": None, "text": words[2], "tag": pos[2]},
{"lemma": None, "text": words[3], "tag": pos[3]},
]
assert deps["arcs"] == [
{"start": 0, "end": 1, "label": "nsubj", "dir": "left"},
{"start": 2, "end": 3, "label": "det", "dir": "left"},
{"start": 1, "end": 3, "label": "attr", "dir": "right"},
]
def test_displacy_invalid_arcs():
renderer = DependencyRenderer()
words = [{"text": "This", "tag": "DET"}, {"text": "is", "tag": "VERB"}]
arcs = [
{"start": 0, "end": 1, "label": "nsubj", "dir": "left"},
{"start": -1, "end": 2, "label": "det", "dir": "left"},
]
with pytest.raises(ValueError):
renderer.render([{"words": words, "arcs": arcs}])
def test_displacy_spans(en_vocab):
"""Test that displaCy can render Spans."""
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
html = displacy.render(doc[1:4], style="ent")
assert html.startswith("<div")
def test_displacy_raises_for_wrong_type(en_vocab):
with pytest.raises(ValueError):
displacy.render("hello world")
def test_displacy_rtl():
# Source: http://www.sobhe.ir/hazm/ – is this correct?
words = ["ما", "بسیار", "کتاب", "می\u200cخوانیم"]
# These are (likely) wrong, but it's just for testing
pos = ["PRO", "ADV", "N_PL", "V_SUB"] # needs to match lang.fa.tag_map
deps = ["foo", "bar", "foo", "baz"]
heads = [1, 0, 3, 1]
nlp = Persian()
doc = Doc(nlp.vocab, words=words, tags=pos, heads=heads, deps=deps)
doc.ents = [Span(doc, 1, 3, label="TEST")]
html = displacy.render(doc, page=True, style="dep")
assert "direction: rtl" in html
assert 'direction="rtl"' in html
assert f'lang="{nlp.lang}"' in html
html = displacy.render(doc, page=True, style="ent")
assert "direction: rtl" in html
assert f'lang="{nlp.lang}"' in html
def test_displacy_render_wrapper(en_vocab):
"""Test that displaCy accepts custom rendering wrapper."""
def wrapper(html):
return "TEST" + html + "TEST"
displacy.set_render_wrapper(wrapper)
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
html = displacy.render(doc, style="ent")
assert html.startswith("TEST<div")
assert html.endswith("/div>TEST")
# Restore
displacy.set_render_wrapper(lambda html: html)
def test_displacy_options_case():
ents = ["foo", "BAR"]
colors = {"FOO": "red", "bar": "green"}
renderer = EntityRenderer({"ents": ents, "colors": colors})
text = "abcd"
labels = ["foo", "bar", "FOO", "BAR"]
spans = [{"start": i, "end": i + 1, "label": labels[i]} for i in range(len(text))]
result = renderer.render_ents("abcde", spans, None).split("\n\n")
assert "red" in result[0] and "foo" in result[0]
assert "green" in result[1] and "bar" in result[1]
assert "red" in result[2] and "FOO" in result[2]
assert "green" in result[3] and "BAR" in result[3]
| 38.1875 | 92 | 0.590653 | [
"MIT"
] | xettrisomeman/spaCy | spacy/tests/test_displacy.py | 5,520 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class SecurityRule(SubResource):
"""Network security rule.
:param id: Resource Id
:type id: str
:param description: Gets or sets a description for this rule. Restricted
to 140 chars.
:type description: str
:param protocol: Gets or sets Network protocol this rule applies to. Can
be Tcp, Udp or All(*). Possible values include: 'Tcp', 'Udp', '*'
:type protocol: str or :class:`SecurityRuleProtocol
<azure.mgmt.network.models.SecurityRuleProtocol>`
:param source_port_range: Gets or sets Source Port or Range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type source_port_range: str
:param destination_port_range: Gets or sets Destination Port or Range.
Integer or range between 0 and 65535. Asterix '*' can also be used to
match all ports.
:type destination_port_range: str
:param source_address_prefix: Gets or sets source address prefix. CIDR or
source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and
'Internet' can also be used. If this is an ingress rule, specifies where
network traffic originates from.
:type source_address_prefix: str
:param destination_address_prefix: Gets or sets destination address
prefix. CIDR or source IP range. Asterix '*' can also be used to match
all source IPs. Default tags such as 'VirtualNetwork',
'AzureLoadBalancer' and 'Internet' can also be used.
:type destination_address_prefix: str
:param access: Gets or sets network traffic is allowed or denied.
Possible values are 'Allow' and 'Deny'. Possible values include:
'Allow', 'Deny'
:type access: str or :class:`SecurityRuleAccess
<azure.mgmt.network.models.SecurityRuleAccess>`
:param priority: Gets or sets the priority of the rule. The value can be
between 100 and 4096. The priority number must be unique for each rule
in the collection. The lower the priority number, the higher the
priority of the rule.
:type priority: int
:param direction: Gets or sets the direction of the rule.InBound or
Outbound. The direction specifies if rule will be evaluated on incoming
or outcoming traffic. Possible values include: 'Inbound', 'Outbound'
:type direction: str or :class:`SecurityRuleDirection
<azure.mgmt.network.models.SecurityRuleDirection>`
:param provisioning_state: Gets provisioning state of the PublicIP
resource Updating/Deleting/Failed
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'source_address_prefix': {'required': True},
'destination_address_prefix': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, protocol, source_address_prefix, destination_address_prefix, access, direction, id=None, description=None, source_port_range=None, destination_port_range=None, priority=None, provisioning_state=None, name=None, etag=None):
super(SecurityRule, self).__init__(id=id)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.destination_address_prefix = destination_address_prefix
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| 49.263636 | 245 | 0.671342 | [
"MIT"
] | CharaD7/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/models/security_rule.py | 5,419 | Python |
# Copyright 2018, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements DPQuery interface for Gaussian average queries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from distutils.version import LooseVersion
import tensorflow.compat.v1 as tf
from tensorflow_privacy.privacy.dp_query import dp_query
from tensorflow_privacy.privacy.dp_query import normalized_query
class GaussianSumQuery(dp_query.SumAggregationDPQuery):
"""Implements DPQuery interface for Gaussian sum queries.
Accumulates clipped vectors, then adds Gaussian noise to the sum.
"""
# pylint: disable=invalid-name
_GlobalState = collections.namedtuple(
'_GlobalState', ['l2_norm_clip', 'stddev'])
def __init__(self, l2_norm_clip, stddev):
"""Initializes the GaussianSumQuery.
Args:
l2_norm_clip: The clipping norm to apply to the global norm of each
record.
stddev: The stddev of the noise added to the sum.
"""
self._l2_norm_clip = l2_norm_clip
self._stddev = stddev
self._ledger = None
def set_ledger(self, ledger):
self._ledger = ledger
def make_global_state(self, l2_norm_clip, stddev):
"""Creates a global state from the given parameters."""
return self._GlobalState(tf.cast(l2_norm_clip, tf.float32),
tf.cast(stddev, tf.float32))
def initial_global_state(self):
return self.make_global_state(self._l2_norm_clip, self._stddev)
def derive_sample_params(self, global_state):
return global_state.l2_norm_clip
def initial_sample_state(self, template):
return tf.nest.map_structure(
dp_query.zeros_like, template)
def preprocess_record_impl(self, params, record):
"""Clips the l2 norm, returning the clipped record and the l2 norm.
Args:
params: The parameters for the sample.
record: The record to be processed.
Returns:
A tuple (preprocessed_records, l2_norm) where `preprocessed_records` is
the structure of preprocessed tensors, and l2_norm is the total l2 norm
before clipping.
"""
l2_norm_clip = params
record_as_list = tf.nest.flatten(record)
clipped_as_list, norm = tf.clip_by_global_norm(record_as_list, l2_norm_clip)
return tf.nest.pack_sequence_as(record, clipped_as_list), norm
def preprocess_record(self, params, record):
preprocessed_record, _ = self.preprocess_record_impl(params, record)
return preprocessed_record
def get_noised_result(self, sample_state, global_state):
"""See base class."""
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
def add_noise(v):
return v + tf.random.normal(
tf.shape(input=v), stddev=global_state.stddev)
else:
random_normal = tf.random_normal_initializer(
stddev=global_state.stddev)
def add_noise(v):
return v + random_normal(tf.shape(input=v))
if self._ledger:
dependencies = [
self._ledger.record_sum_query(
global_state.l2_norm_clip, global_state.stddev)
]
else:
dependencies = []
with tf.control_dependencies(dependencies):
return tf.nest.map_structure(add_noise, sample_state), global_state
class GaussianAverageQuery(normalized_query.NormalizedQuery):
"""Implements DPQuery interface for Gaussian average queries.
Accumulates clipped vectors, adds Gaussian noise, and normalizes.
Note that we use "fixed-denominator" estimation: the denominator should be
specified as the expected number of records per sample. Accumulating the
denominator separately would also be possible but would be produce a higher
variance estimator.
"""
def __init__(self,
l2_norm_clip,
sum_stddev,
denominator):
"""Initializes the GaussianAverageQuery.
Args:
l2_norm_clip: The clipping norm to apply to the global norm of each
record.
sum_stddev: The stddev of the noise added to the sum (before
normalization).
denominator: The normalization constant (applied after noise is added to
the sum).
"""
super(GaussianAverageQuery, self).__init__(
numerator_query=GaussianSumQuery(l2_norm_clip, sum_stddev),
denominator=denominator)
| 33.715278 | 80 | 0.72379 | [
"Apache-2.0"
] | Juspem1980/privacy | tensorflow_privacy/privacy/dp_query/gaussian_query.py | 4,855 | Python |
class NumArray:
# O(n) time | O(n) space - where n is the length of the input list
def __init__(self, nums: List[int]):
self.nums = []
currentSum = 0
for num in nums:
currentSum += num
self.nums.append(currentSum)
# O(1) time to look up the nums list
def sumRange(self, left: int, right: int) -> int:
if left > 0:
return self.nums[right] - self.nums[left - 1]
else:
return self.nums[right] | 35.214286 | 70 | 0.549696 | [
"MIT"
] | weilincheng/LeetCode-practice | dynamicProgramming/303_range_sum_query_immutable.py | 493 | Python |
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: otp.launcher.DownloadWatcher
from direct.task import Task
from otp.otpbase import OTPLocalizer
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
class DownloadWatcher(DirectObject):
__module__ = __name__
def __init__(self, phaseNames):
self.phaseNames = phaseNames
self.text = DirectLabel(relief=None, guiId='DownloadWatcherText', pos=(-0.96, 0, -0.91), text=OTPLocalizer.DownloadWatcherInitializing, text_fg=(1,
1,
1,
1), text_scale=0.05, textMayChange=1, text_align=TextNode.ALeft, sortOrder=50)
self.bar = DirectWaitBar(guiId='DownloadWatcherBar', pos=(-0.81, 0, -0.96), relief=DGG.SUNKEN, frameSize=(-0.6, 0.6, -0.1, 0.1), borderWidth=(0.02,
0.02), scale=0.25, range=100, sortOrder=50, frameColor=(0.5,
0.5,
0.5,
0.5), barColor=(0.2,
0.7,
0.2,
0.5), text='0%', text_scale=0.16, text_fg=(1,
1,
1,
1), text_align=TextNode.ACenter, text_pos=(0, -0.05))
self.accept('launcherPercentPhaseComplete', self.update)
return
def update(self, phase, percent, reqByteRate, actualByteRate):
phaseName = self.phaseNames[phase]
self.text['text'] = OTPLocalizer.DownloadWatcherUpdate % phaseName
self.bar['text'] = '%s %%' % percent
self.bar['value'] = percent
def cleanup(self):
self.text.destroy()
self.bar.destroy()
self.ignoreAll() | 99.604651 | 318 | 0.257063 | [
"BSD-3-Clause"
] | itsyaboyrocket/pirates | otp/launcher/DownloadWatcher.py | 4,283 | Python |
from ktrade.queue_messages.queue_message import QueueMessage
class BuyMessage(QueueMessage):
def __init__(self, ticker: str):
super().__init__(type='BUY')
self.ticker = ticker
| 26.714286 | 60 | 0.759358 | [
"MIT"
] | webclinic017/ktrade | ktrade/queue_messages/buy_message.py | 187 | Python |
from conans import ConanFile, CMake
class LibB(ConanFile):
name = "libB"
version = "0.0"
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False]}
default_options = {"shared": False}
generators = "cmake"
scm = {"type": "git",
"url": "auto",
"revision": "auto"}
exports_sources = "LICENSE" # to avoid build info bug
def requirements(self):
self.requires("libA/[>=0.0]@demo/testing")
self.requires("libF/0.0@demo/testing")
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
cmake.install()
def package(self):
self.copy("LICENSE", dst="licenses")
def package_info(self):
self.cpp_info.libs = ["libB",]
| 23.176471 | 57 | 0.568528 | [
"MIT"
] | demo-ci-conan/libB | conanfile.py | 788 | Python |
from django.urls import path, include
from .views import *
from django.contrib import admin
app_name = 'Blog'
urlpatterns = [
path('', Blog_index.as_view(), name='blog_index'),
path('', include('Usermanagement.urls', namespace='Usermanagement'), name='usermanagement'),
path('create_article/', article_create, name='article_create'),
path('<str:column_name>/', Column_detail.as_view(), name='column_detail'),
path('<str:column_name>/<slug:article_name>/', Article_detail.as_view(), name='article_detail'),
path('<str:column_name>/<slug:article_name>/delete/', article_delete, name='article_delete'),
]
| 39.5625 | 100 | 0.71406 | [
"MIT"
] | Roy-Kid/my_blog | Blog/urls.py | 633 | Python |
import sys
sys.path.append('../')
from logparser import IPLoM, evaluator
import os
import pandas as pd
CT = [0.25, 0.3, 0.4, 0.4, 0.35, 0.58, 0.3, 0.3, 0.9, 0.78, 0.35, 0.3, 0.4]
lb = [0.3, 0.4, 0.01, 0.2, 0.25, 0.25, 0.3, 0.25, 0.25, 0.25, 0.3, 0.2, 0.7]
n_para = 13
benchmark_settings = {
'HDFS': {
'log_file': 'HDFS/HDFS_2k.log',
'log_format': '<Date> <Time> <Pid> <Level> <Component>: <Content>',
'regex': [r'blk_-?\d+', r'(\d+\.){3}\d+(:\d+)?'],
'st': 0.5,
'depth': 4
},
'Hadoop': {
'log_file': 'Hadoop/Hadoop_2k.log',
'log_format': '<Date> <Time> <Level> \[<Process>\] <Component>: <Content>',
'regex': [r'(\d+\.){3}\d+'],
'st': 0.5,
'depth': 4
},
'Spark': {
'log_file': 'Spark/Spark_2k.log',
'log_format': '<Date> <Time> <Level> <Component>: <Content>',
'regex': [r'(\d+\.){3}\d+', r'\b[KGTM]?B\b', r'([\w-]+\.){2,}[\w-]+'],
'st': 0.5,
'depth': 4
},
'Zookeeper': {
'log_file': 'Zookeeper/Zookeeper_2k.log',
'log_format': '<Date> <Time> - <Level> \[<Node>:<Component>@<Id>\] - <Content>',
'regex': [r'(/|)(\d+\.){3}\d+(:\d+)?'],
'st': 0.5,
'depth': 4
},
'BGL': {
'log_file': 'BGL/BGL_2k.log',
'log_format': '<Label> <Timestamp> <Date> <Node> <Time> <NodeRepeat> <Type> <Component> <Level> <Content>',
'regex': [r'core\.\d+'],
'st': 0.5,
'depth': 4
},
'HPC': {
'log_file': 'HPC/HPC_2k.log',
'log_format': '<LogId> <Node> <Component> <State> <Time> <Flag> <Content>',
'regex': [r'=\d+'],
'st': 0.5,
'depth': 4
},
'Thunderbird': {
'log_file': 'Thunderbird/Thunderbird_2k.log',
'log_format': '<Label> <Timestamp> <Date> <User> <Month> <Day> <Time> <Location> <Component>(\[<PID>\])?: <Content>',
'regex': [r'(\d+\.){3}\d+'],
'st': 0.5,
'depth': 4
},
'Windows': {
'log_file': 'Windows/Windows_2k.log',
'log_format': '<Date> <Time>, <Level> <Component> <Content>',
'regex': [r'0x.*?\s'],
'st': 0.7,
'depth': 5
},
'Linux': {
'log_file': 'Linux/Linux_2k.log',
'log_format': '<Month> <Date> <Time> <Level> <Component>(\[<PID>\])?: <Content>',
'regex': [r'(\d+\.){3}\d+', r'\d{2}:\d{2}:\d{2}'],
'st': 0.39,
'depth': 6
},
'Andriod': {
'log_file': 'Andriod/Andriod_2k.log',
'log_format': '<Date> <Time> <Pid> <Tid> <Level> <Component>: <Content>',
'regex': [r'(/[\w-]+)+', r'([\w-]+\.){2,}[\w-]+', r'\b(\-?\+?\d+)\b|\b0[Xx][a-fA-F\d]+\b|\b[a-fA-F\d]{4,}\b'],
'st': 0.2,
'depth': 6
},
'HealthApp': {
'log_file': 'HealthApp/HealthApp_2k.log',
'log_format': '<Time>\|<Component>\|<Pid>\|<Content>',
'regex': [],
'st': 0.2,
'depth': 4
},
'Apache': {
'log_file': 'Apache/Apache_2k.log',
'log_format': '\[<Time>\] \[<Level>\] <Content>',
'regex': [r'(\d+\.){3}\d+'],
'st': 0.5,
'depth': 4
},
'Proxifier': {
'log_file': 'Proxifier/Proxifier_2k.log',
'log_format': '\[<Time>\] <Program> - <Content>',
'regex': [r'<\d+\ssec', r'([\w-]+\.)+[\w-]+(:\d+)?', r'\d{2}:\d{2}(:\d{2})*', r'[KGTM]B'],
'st': 0.6,
'depth': 3
},
'OpenSSH': {
'log_file': 'OpenSSH/OpenSSH_2k.log',
'log_format': '<Date> <Day> <Time> <Component> sshd\[<Pid>\]: <Content>',
'regex': [r'(\d+\.){3}\d+', r'([\w-]+\.){2,}[\w-]+'],
'st': 0.6,
'depth': 5
},
'OpenStack': {
'log_file': 'OpenStack/OpenStack_2k.log',
'log_format': '<Logrecord> <Date> <Time> <Pid> <Level> <Component> \[<ADDR>\] <Content>',
'regex': [r'((\d+\.){3}\d+,?)+', r'/.+?\s', r'\d+'],
'st': 0.5,
'depth': 5
},
'Mac': {
'log_file': 'Mac/Mac_2k.log',
'log_format': '<Month> <Date> <Time> <User> <Component>\[<PID>\]( \(<Address>\))?: <Content>',
'regex': [r'([\w-]+\.){2,}[\w-]+'],
'st': 0.7,
'depth': 6
},
}
input_dir = '../../AgreementData/'
output_dir_1 = 'result/file1'
output_dir_2 = 'result/file2'
HDFS_dir = 'HDFS/'
Hadoop_dir = 'Hadoop/'
Spark_dir = 'Spark/'
Zookeeper_dir = 'Zookeeper/'
BGL_dir = 'BGL/'
HPC_dir = 'HPC/'
Thunderbird_dir = 'Thunderbird/'
Windows_dir = 'Windows/'
Linux_dir = 'Linux/'
Android_dir = 'Android/'
Apache_dir = 'Apache/'
OpenSSH_dir = 'OpenSSH/'
OpenStack_dir = 'OpenStack/'
Mac_dir = 'Mac/'
HealthApp_dir = 'HealthApp/'
Proxifier_dir = 'Proxifier/'
HDFS_file = 'HDFS.log'
Hadoop_file = 'Hadoop.log'
Spark_file = 'Spark.log'
Zookeeper_file = 'Zookeeper.log'
BGL_file = 'BGL.log'
HPC_file = 'HPC.log'
Thunderbird_file = 'Thunderbird.log'
Windows_file = 'Windows.log'
Linux_file = 'Linux.log'
Android_file = 'Android.log'
Apache_file = 'Apache.log'
OpenSSH_file = 'SSH.log'
OpenStack_file = 'OpenStack.log'
Mac_file = 'Mac.log'
HealthApp_file = 'HealthApp.log'
Proxifier_file = 'Proxifier.log'
Android_num = 10
Apache_num = 10
BGL_num = 10
Hadoop_num = 10
HDFS_num = 10
HealthApp_num = 10
HPC_num = 10
Linux_num = 3
Mac_num = 10
OpenSSH_num = 10
OpenStack_num = 1
Proxifier_num = 2
Spark_num = 10
Thunderbird_num = 10
Windows_num = 10
Zookeeper_num = 10
setting = benchmark_settings['BGL']
agreement_result = []
for index in range(0,BGL_num,1):
logfile_1 = BGL_file + '.part' + str(index)
logfile_2 = BGL_file + '.part' + str(index+1)
indir = input_dir + BGL_dir
print(logfile_1)
print(logfile_2)
for para_index in range(0,n_para-1,1):
para_info = str(CT[para_index]) + ',' + str(lb[para_index])
print(para_info)
parser_1 = IPLoM.LogParser(log_format=setting['log_format'], indir=indir, outdir=output_dir_1,
CT=CT[para_index], lowerBound=lb[para_index], rex=setting['regex'])
parser_2 = IPLoM.LogParser(log_format=setting['log_format'], indir=indir, outdir=output_dir_2,
CT=CT[para_index], lowerBound=lb[para_index], rex=setting['regex'])
parser_1.parse(logfile_1)
parser_2.parse(logfile_2)
agreement = evaluator.evaluate_agreement(
os.path.join(output_dir_1, logfile_1 + '_structured.csv'),
os.path.join(output_dir_2, logfile_2 + '_structured.csv'))
ratio = float(float(agreement)/5000.0)
agreement_result.append([logfile_1,logfile_2,para_info,ratio])
df_result = pd.DataFrame(agreement_result, columns=['File1', 'File2', 'Para', 'Agreement'])
print(df_result)
df_result.to_csv('IPLoM_agreement_BGL.csv')
| 30.222222 | 125 | 0.524118 | [
"MIT"
] | dhetong/LogSampleTest | benchmark/IPLoM_agreement.py | 6,800 | Python |
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for suggestion registry classes."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import os
from core.domain import config_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_services
from core.domain import html_validation_service
from core.domain import question_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import suggestion_registry
from core.domain import suggestion_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
import utils
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
class MockInvalidSuggestion(suggestion_registry.BaseSuggestion):
def __init__(self): # pylint: disable=super-init-not-called
pass
class BaseSuggestionUnitTests(test_utils.GenericTestBase):
"""Tests for the BaseSuggestion class."""
def setUp(self):
super(BaseSuggestionUnitTests, self).setUp()
self.base_suggestion = MockInvalidSuggestion()
def test_base_class_accept_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement accept.'):
self.base_suggestion.accept()
def test_base_class_get_change_list_for_accepting_suggestion_raises_error(
self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement '
'get_change_list_for_accepting_suggestion.'):
self.base_suggestion.get_change_list_for_accepting_suggestion()
def test_base_class_pre_accept_validate_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' pre_accept_validate.'):
self.base_suggestion.pre_accept_validate()
def test_base_class_populate_old_value_of_change_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' populate_old_value_of_change.'):
self.base_suggestion.populate_old_value_of_change()
def test_base_class_pre_update_validate_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' pre_update_validate.'):
self.base_suggestion.pre_update_validate({})
def test_base_class_get_all_html_content_strings(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' get_all_html_content_strings.'):
self.base_suggestion.get_all_html_content_strings()
def test_base_class_get_target_entity_html_strings(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' get_target_entity_html_strings.'):
self.base_suggestion.get_target_entity_html_strings()
def test_base_class_convert_html_in_suggestion_change(self):
def conversion_fn():
"""Temporary function."""
pass
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' convert_html_in_suggestion_change.'):
self.base_suggestion.convert_html_in_suggestion_change(
conversion_fn)
class SuggestionEditStateContentUnitTests(test_utils.GenericTestBase):
"""Tests for the SuggestionEditStateContent class."""
AUTHOR_EMAIL = '[email protected]'
REVIEWER_EMAIL = '[email protected]'
ASSIGNED_REVIEWER_EMAIL = '[email protected]'
fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0)
def setUp(self):
super(SuggestionEditStateContentUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.suggestion_dict = {
'suggestion_id': 'exploration.exp1.thread1',
'suggestion_type': (
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': feconf.ENTITY_TYPE_EXPLORATION,
'target_id': 'exp1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': 'new suggestion content',
'old_value': None
},
'score_category': 'content.Algebra',
'language_code': None,
'last_updated': utils.get_time_in_millisecs(self.fake_date),
'edited_by_reviewer': False
}
def test_create_suggestion_edit_state_content(self):
expected_suggestion_dict = self.suggestion_dict
observed_suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertDictEqual(
observed_suggestion.to_dict(), expected_suggestion_dict)
def test_validate_suggestion_edit_state_content(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
def test_get_score_part_helper_methods(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertEqual(suggestion.get_score_type(), 'content')
self.assertEqual(suggestion.get_score_sub_type(), 'Algebra')
def test_validate_suggestion_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.suggestion_type = 'invalid_suggestion_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected suggestion_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_type = 'invalid_target_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected target_id to be a string'
):
suggestion.validate()
def test_validate_target_version_at_submission(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_version_at_submission = 'invalid_version'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_version_at_submission to be an int'
):
suggestion.validate()
def test_validate_status(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.status = 'invalid_status'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be among allowed choices'
):
suggestion.validate()
def test_validate_author_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'
):
suggestion.validate()
def test_validate_author_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = self.PSEUDONYMOUS_ID
suggestion.validate()
suggestion.author_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected author_id to be in a valid user ID format'
):
suggestion.validate()
def test_validate_final_reviewer_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'
):
suggestion.validate()
def test_validate_final_reviewer_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = self.PSEUDONYMOUS_ID
suggestion.validate()
suggestion.final_reviewer_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be in a valid user ID format'
):
suggestion.validate()
def test_validate_score_category(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected score_category to be a string'
):
suggestion.validate()
def test_validate_score_category_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'score.score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
suggestion.score_category = 'invalid_score_category'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
def test_validate_score_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'invalid_score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be among allowed'
' choices'
):
suggestion.validate()
def test_validate_change(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to be an ExplorationChange'
):
suggestion.validate()
def test_validate_score_type_content(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'question.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be content'
):
suggestion.validate()
def test_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = 'invalid_cmd'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected cmd to be edit_state_property'
):
suggestion.validate()
def test_validate_change_property_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.property_name = 'invalid_property'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected property_name to be content'
):
suggestion.validate()
def test_validate_language_code_fails_when_language_codes_do_not_match(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.language_code = 'wrong_language_code'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be None, received wrong_language_code'
):
suggestion.validate()
def test_pre_accept_validate_state_name(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
exp_services.update_exploration(
self.author_id, 'exp1', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'State A',
})
], 'Added state')
suggestion.change.state_name = 'State A'
suggestion.pre_accept_validate()
suggestion.change.state_name = 'invalid_state_name'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected invalid_state_name to be a valid state name'
):
suggestion.pre_accept_validate()
def test_populate_old_value_of_change_with_invalid_state(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.change.state_name = 'invalid_state_name'
self.assertIsNone(suggestion.change.old_value)
suggestion.populate_old_value_of_change()
self.assertIsNone(suggestion.change.old_value)
def test_pre_update_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_STATE,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': suggestion.change.state_name,
'new_value': 'new suggestion content',
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The following extra attributes are present: new_value, '
'old_value, property_name'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_property_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_PARAM_CHANGES,
'state_name': suggestion.change.state_name,
'new_value': 'new suggestion content',
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change property_name must be equal to content'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_state_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'invalid_state',
'new_value': 'new suggestion content',
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change state_name must be equal to state_1'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_new_value(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
new_content = state_domain.SubtitledHtml(
'content', '<p>new suggestion html</p>').to_dict()
suggestion.change.new_value = new_content
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': suggestion.change.state_name,
'new_value': new_content,
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError, 'The new html must not match the old html'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_non_equal_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change cmd must be equal to edit_state_property'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': 'Exploration 1 Albert title'
}))
def test_get_all_html_content_strings(self):
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': None
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_all_html_content_strings()
expected_outcome_list = [u'new suggestion content']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_convert_html_in_suggestion_change(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'Introduction',
'new_value': {
'content_id': 'content',
'html': '<p>suggestion</p>'
},
'old_value': {
'content_id': 'content',
'html': html_content
}
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
suggestion.convert_html_in_suggestion_change(
html_validation_service.
add_math_content_to_math_rte_components)
self.assertEqual(
suggestion.change.old_value['html'], expected_html_content)
def test_get_target_entity_html_strings_returns_expected_strings(self):
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': {
'content_id': 'content',
'html': 'Old content.'
}
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_target_entity_html_strings()
expected_outcome_list = [u'Old content.']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_get_target_entity_html_with_none_old_value(self):
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': None
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_target_entity_html_strings()
self.assertEqual(actual_outcome_list, [])
class SuggestionTranslateContentUnitTests(test_utils.GenericTestBase):
"""Tests for the SuggestionEditStateContent class."""
AUTHOR_EMAIL = '[email protected]'
REVIEWER_EMAIL = '[email protected]'
ASSIGNED_REVIEWER_EMAIL = '[email protected]'
fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0)
def setUp(self):
super(SuggestionTranslateContentUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.suggestion_dict = {
'suggestion_id': 'exploration.exp1.thread1',
'suggestion_type': (
feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT),
'target_type': feconf.ENTITY_TYPE_EXPLORATION,
'target_id': 'exp1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>This is a content.</p>',
'translation_html': '<p>This is translated html.</p>',
'data_format': 'html'
},
'score_category': 'translation.Algebra',
'language_code': 'hi',
'last_updated': utils.get_time_in_millisecs(self.fake_date),
'edited_by_reviewer': False
}
def test_pre_update_validate_fails_for_invalid_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'Introduction'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change cmd must be equal to %s' % (
exp_domain.CMD_ADD_WRITTEN_TRANSLATION)
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_state_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'State 1',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>This is a content.</p>',
'translation_html': '<p>This is the updated translated html.</p>',
'data_format': 'html'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change state_name must be equal to Introduction'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_language_code(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'en',
'content_html': '<p>This is a content.</p>',
'translation_html': '<p>This is the updated translated html.</p>',
'data_format': 'html'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The language code must be equal to hi'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_content_html(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'en',
'content_html': '<p>This is the changed content.</p>',
'translation_html': '<p>This is the updated translated html.</p>',
'data_format': 'html'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change content_html must be equal to <p>This is a ' +
'content.</p>'
):
suggestion.pre_update_validate(
exp_domain.ExplorationChange(change))
def test_create_suggestion_add_translation(self):
expected_suggestion_dict = self.suggestion_dict
observed_suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertDictEqual(
observed_suggestion.to_dict(), expected_suggestion_dict)
def test_validate_suggestion_add_translation(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
def test_get_score_part_helper_methods(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertEqual(suggestion.get_score_type(), 'translation')
self.assertEqual(suggestion.get_score_sub_type(), 'Algebra')
def test_validate_suggestion_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.suggestion_type = 'invalid_suggestion_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected suggestion_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_type = 'invalid_target_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected target_id to be a string'
):
suggestion.validate()
def test_validate_target_version_at_submission(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_version_at_submission = 'invalid_version'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_version_at_submission to be an int'
):
suggestion.validate()
def test_validate_status(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.status = 'invalid_status'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be among allowed choices'
):
suggestion.validate()
def test_validate_author_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'
):
suggestion.validate()
def test_validate_author_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected author_id to be in a valid user ID format.'
):
suggestion.validate()
def test_validate_final_reviewer_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'
):
suggestion.validate()
def test_validate_final_reviewer_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be in a valid user ID format'
):
suggestion.validate()
def test_validate_score_category(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected score_category to be a string'
):
suggestion.validate()
def test_validate_score_category_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'score.score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
suggestion.score_category = 'invalid_score_category'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
def test_validate_score_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'invalid_score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be among allowed'
' choices'
):
suggestion.validate()
def test_validate_change(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to be an ExplorationChange'
):
suggestion.validate()
def test_validate_score_type_translation(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'question.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be translation'
):
suggestion.validate()
def test_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = 'invalid_cmd'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected cmd to be add_written_translation'
):
suggestion.validate()
def test_validate_language_code_fails_when_language_codes_do_not_match(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
expected_language_code = (
expected_suggestion_dict['change']['language_code']
)
suggestion.validate()
suggestion.language_code = 'wrong_language_code'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be %s, '
'received wrong_language_code' % expected_language_code
):
suggestion.validate()
def test_validate_language_code_fails_when_language_code_is_set_to_none(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.language_code = None
with self.assertRaisesRegexp(
utils.ValidationError, 'language_code cannot be None'
):
suggestion.validate()
def test_validate_change_with_invalid_language_code_fails_validation(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.language_code = 'invalid_code'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid_code'
):
suggestion.validate()
def test_pre_accept_validate_state_name(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
exp_services.update_exploration(
self.author_id, 'exp1', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'State A',
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'new_value': {
'content_id': 'content',
'html': '<p>This is a content.</p>'
},
'state_name': 'State A',
})
], 'Added state')
suggestion.change.state_name = 'State A'
suggestion.pre_accept_validate()
suggestion.change.state_name = 'invalid_state_name'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected invalid_state_name to be a valid state name'
):
suggestion.pre_accept_validate()
def test_pre_accept_validate_content_html(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
exp_services.update_exploration(
self.author_id, 'exp1', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'State A',
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'new_value': {
'content_id': 'content',
'html': '<p>This is a content.</p>'
},
'state_name': 'State A',
})
], 'Added state')
suggestion.change.state_name = 'State A'
suggestion.pre_accept_validate()
suggestion.change.content_html = 'invalid content_html'
with self.assertRaisesRegexp(
utils.ValidationError,
'The Exploration content has changed since this translation '
'was submitted.'
):
suggestion.pre_accept_validate()
def test_accept_suggestion_adds_translation_in_exploration(self):
self.save_new_default_exploration('exp1', self.author_id)
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {})
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.accept(
'Accepted suggestion by translator: Add translation change.')
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {
'hi': 1
})
def test_accept_suggestion_with_psedonymous_author_adds_translation(self):
self.save_new_default_exploration('exp1', self.author_id)
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {})
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.PSEUDONYMOUS_ID,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.accept(
'Accepted suggestion by translator: Add translation change.')
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {
'hi': 1
})
def test_get_all_html_content_strings(self):
suggestion = suggestion_registry.SuggestionTranslateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, self.suggestion_dict['change'],
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_all_html_content_strings()
expected_outcome_list = [
u'<p>This is translated html.</p>', u'<p>This is a content.</p>']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_get_target_entity_html_strings_returns_expected_strings(self):
suggestion = suggestion_registry.SuggestionTranslateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, self.suggestion_dict['change'],
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_target_entity_html_strings()
expected_outcome_list = [self.suggestion_dict['change']['content_html']]
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_convert_html_in_suggestion_change(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
change_dict = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': html_content,
'translation_html': '<p>This is translated html.</p>',
'data_format': 'html'
}
suggestion = suggestion_registry.SuggestionTranslateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
suggestion.convert_html_in_suggestion_change(
html_validation_service.add_math_content_to_math_rte_components)
self.assertEqual(
suggestion.change.content_html, expected_html_content)
class SuggestionAddQuestionTest(test_utils.GenericTestBase):
"""Tests for the SuggestionAddQuestion class."""
AUTHOR_EMAIL = '[email protected]'
REVIEWER_EMAIL = '[email protected]'
ASSIGNED_REVIEWER_EMAIL = '[email protected]'
fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0)
def setUp(self):
super(SuggestionAddQuestionTest, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.suggestion_dict = {
'suggestion_id': 'skill1.thread1',
'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION,
'target_type': feconf.ENTITY_TYPE_SKILL,
'target_id': 'skill1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3,
},
'score_category': 'question.topic_1',
'language_code': 'en',
'last_updated': utils.get_time_in_millisecs(self.fake_date),
'edited_by_reviewer': False
}
def test_create_suggestion_add_question(self):
expected_suggestion_dict = self.suggestion_dict
observed_suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertDictEqual(
observed_suggestion.to_dict(), expected_suggestion_dict)
def test_validate_suggestion_edit_state_content(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
def test_get_score_part_helper_methods(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertEqual(suggestion.get_score_type(), 'question')
self.assertEqual(suggestion.get_score_sub_type(), 'topic_1')
def test_validate_score_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'content.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be "question"'
):
suggestion.validate()
def test_validate_change_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change = 'invalid_change'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected change to be an instance of QuestionSuggestionChange'
):
suggestion.validate()
def test_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain cmd'
):
suggestion.validate()
def test_validate_change_cmd_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = 'invalid_cmd'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected cmd to be create_new_fully_specified_question'
):
suggestion.validate()
def test_validate_change_question_dict(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.question_dict = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain question_dict'
):
suggestion.validate()
def test_validate_change_question_state_data_schema_version(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
# We are not setting value in suggestion.change.question_dict
# directly since pylint produces unsupported-assignment-operation
# error. The detailed analysis for the same can be checked
# in this issue: https://github.com/oppia/oppia/issues/7008.
question_dict = suggestion.change.question_dict
question_dict['question_state_data_schema_version'] = 0
suggestion.change.question_dict = question_dict
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected question state schema version to be %s, '
'received 0' % feconf.CURRENT_STATE_SCHEMA_VERSION
):
suggestion.validate()
def test_validate_change_skill_difficulty_none(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.skill_difficulty = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain skill_difficulty'
):
suggestion.validate()
def test_validate_change_skill_difficulty_invalid_value(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.skill_difficulty = 0.4
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected change skill_difficulty to be one of '
):
suggestion.validate()
def test_pre_accept_validate_change_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(skill_id, self.author_id, description='description')
suggestion.change.skill_id = skill_id
suggestion.pre_accept_validate()
suggestion.change.skill_id = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain skill_id'
):
suggestion.pre_accept_validate()
def test_pre_accept_validate_change_invalid_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(skill_id, self.author_id, description='description')
suggestion.change.skill_id = skill_id
suggestion.pre_accept_validate()
suggestion.change.skill_id = skill_services.get_new_skill_id()
with self.assertRaisesRegexp(
utils.ValidationError, 'The skill with the given id doesn\'t exist.'
):
suggestion.pre_accept_validate()
def test_get_change_list_for_accepting_suggestion(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertIsNone(suggestion.get_change_list_for_accepting_suggestion())
def test_populate_old_value_of_change(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertIsNone(suggestion.populate_old_value_of_change())
def test_cannot_accept_suggestion_with_invalid_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.change.skill_id = skill_services.get_new_skill_id()
with self.assertRaisesRegexp(
utils.ValidationError,
'The skill with the given id doesn\'t exist.'
):
suggestion.accept('commit message')
def test_pre_update_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': question_domain.CMD_UPDATE_QUESTION_PROPERTY,
'property_name': question_domain.QUESTION_PROPERTY_LANGUAGE_CODE,
'new_value': 'bn',
'old_value': 'en'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change cmd must be equal to '
'create_new_fully_specified_question'
):
suggestion.pre_update_validate(
question_domain.QuestionChange(change))
def test_pre_update_validate_change_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_2'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change skill_id must be equal to skill_1'
):
suggestion.pre_update_validate(
question_domain.QuestionChange(change))
def test_pre_update_validate_complains_if_nothing_changed(self):
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
suggestion = suggestion_registry.SuggestionAddQuestion(
'exploration.exp1.thread1', 'exp1', 1,
suggestion_models.STATUS_ACCEPTED, self.author_id,
self.reviewer_id, change,
'question.topic_1', 'en', self.fake_date)
new_change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
with self.assertRaisesRegexp(
utils.ValidationError,
'At least one of the new skill_difficulty or question_dict '
'should be changed.'):
suggestion.pre_update_validate(
question_domain.QuestionSuggestionChange(new_change))
def test_pre_update_validate_accepts_a_change_in_skill_difficulty_only(
self):
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
suggestion = suggestion_registry.SuggestionAddQuestion(
'exploration.exp1.thread1', 'exp1', 1,
suggestion_models.STATUS_ACCEPTED, self.author_id,
self.reviewer_id, change,
'question.topic_1', 'en', self.fake_date)
new_change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.6
}
self.assertEqual(
suggestion.pre_update_validate(
question_domain.QuestionSuggestionChange(new_change)), None)
def test_pre_update_validate_accepts_a_change_in_state_data_only(self):
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
suggestion = suggestion_registry.SuggestionAddQuestion(
'exploration.exp1.thread1', 'exp1', 1,
suggestion_models.STATUS_ACCEPTED, self.author_id,
self.reviewer_id, change,
'question.topic_1', 'en', self.fake_date)
new_change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'hi',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
self.assertEqual(
suggestion.pre_update_validate(
question_domain.QuestionSuggestionChange(new_change)), None)
def test_validate_author_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'):
suggestion.validate()
def test_validate_author_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected author_id to be in a valid user ID format.'):
suggestion.validate()
def test_validate_final_reviewer_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'):
suggestion.validate()
def test_validate_final_reviewer_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be in a valid user ID format'):
suggestion.validate()
def test_validate_language_code_fails_when_language_codes_do_not_match(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
expected_question_dict = (
expected_suggestion_dict['change']['question_dict']
)
suggestion.validate()
expected_question_dict['language_code'] = 'wrong_language_code'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected question language_code.wrong_language_code. to be same '
'as suggestion language_code.en.'
):
suggestion.validate()
def test_validate_language_code_fails_when_language_code_is_set_to_none(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.language_code = None
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be en, received None'):
suggestion.validate()
def test_get_all_html_conztent_strings(self):
suggestion = suggestion_registry.SuggestionAddQuestion(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, self.suggestion_dict['change'],
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], self.fake_date)
actual_outcome_list = suggestion.get_all_html_content_strings()
expected_outcome_list = [
u'', u'<p>This is a hint.</p>', u'<p>This is a solution.</p>', u'']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_convert_html_in_suggestion_change(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
answer_group = {
'outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': ''
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 0
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': html_content
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content_1': {},
'feedback_1': {},
'feedback_2': {},
'hint_1': {},
'solution': {}
}
},
'written_translations': {
'translations_mapping': {
'content_1': {},
'feedback_1': {},
'feedback_2': {},
'hint_1': {},
'solution': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'choices': {
'value': [{
'html': 'option 1',
'content_id': 'ca_choices_0'
}]
},
'showChoicesInShuffledOrder': {
'value': True
}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_2',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {
'answer_is_exclusive': False,
'correct_answer': 0,
'explanation': {
'content_id': 'solution',
'html': '<p>This is a solution.</p>'
}
},
'id': 'MultipleChoiceInput'
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
suggestion_dict = {
'suggestion_id': 'skill1.thread1',
'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION,
'target_type': feconf.ENTITY_TYPE_SKILL,
'target_id': 'skill1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': question_state_dict,
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3,
},
'score_category': 'question.skill1',
'language_code': 'en',
'last_updated': utils.get_time_in_millisecs(self.fake_date)
}
suggestion = suggestion_registry.SuggestionAddQuestion(
suggestion_dict['suggestion_id'], suggestion_dict['target_id'],
suggestion_dict['target_version_at_submission'],
suggestion_dict['status'], self.author_id, self.reviewer_id,
suggestion_dict['change'], suggestion_dict['score_category'],
suggestion_dict['language_code'], False, self.fake_date)
suggestion.convert_html_in_suggestion_change(
html_validation_service.add_math_content_to_math_rte_components)
self.assertEqual(
suggestion.change.question_dict['question_state_data']['content'][
'html'], expected_html_content)
def test_accept_suggestion_with_images(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;img.svg&quot;}">'
'</oppia-noninteractive-math>')
question_state_dict = self._create_valid_question_data(
'default_state').to_dict()
question_state_dict['content']['html'] = html_content
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'),
'rb', encoding=None) as f:
raw_image = f.read()
image_context = feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS
fs_services.save_original_and_compressed_versions_of_image(
'img.svg', image_context, 'skill1',
raw_image, 'image', False)
self.save_new_skill('skill1', self.author_id, description='description')
suggestion_dict = {
'suggestion_id': 'skill1.thread1',
'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION,
'target_type': feconf.ENTITY_TYPE_SKILL,
'target_id': 'skill1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': question_state_dict,
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': []
},
'skill_id': 'skill1',
'skill_difficulty': 0.3,
},
'score_category': 'question.skill1',
'language_code': 'en',
'last_updated': utils.get_time_in_millisecs(self.fake_date)
}
suggestion = suggestion_registry.SuggestionAddQuestion(
suggestion_dict['suggestion_id'], suggestion_dict['target_id'],
suggestion_dict['target_version_at_submission'],
suggestion_dict['status'], self.author_id, self.reviewer_id,
suggestion_dict['change'], suggestion_dict['score_category'],
suggestion_dict['language_code'], False, self.fake_date)
suggestion.accept('commit_message')
def test_contructor_updates_state_shema_in_change_cmd(self):
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'skill_id')
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': 27,
'language_code': 'en',
'linked_skill_ids': ['skill_id'],
'inapplicable_skill_misconception_ids': []
},
'skill_id': 'skill_id',
'skill_difficulty': 0.3
}
self.assertEqual(
change['question_dict']['question_state_data_schema_version'], 27)
suggestion = suggestion_registry.SuggestionAddQuestion(
'suggestionId', 'target_id', 1, suggestion_models.STATUS_IN_REVIEW,
self.author_id, None, change, score_category, 'en', False,
self.fake_date)
self.assertEqual(
suggestion.change.question_dict[
'question_state_data_schema_version'],
feconf.CURRENT_STATE_SCHEMA_VERSION)
def test_contructor_raise_exception_for_invalid_state_shema_version(self):
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'skill_id')
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': None,
'language_code': 'en',
'linked_skill_ids': ['skill_id'],
'inapplicable_skill_misconception_ids': []
},
'skill_id': 'skill_id',
'skill_difficulty': 0.3
}
self.assertEqual(
change['question_dict']['question_state_data_schema_version'], None)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected state schema version to be in between 25'
):
suggestion_registry.SuggestionAddQuestion(
'suggestionId', 'target_id', 1,
suggestion_models.STATUS_IN_REVIEW, self.author_id, None,
change, score_category, 'en', False, self.fake_date)
class MockInvalidVoiceoverApplication(
suggestion_registry.BaseVoiceoverApplication):
def __init__(self): # pylint: disable=super-init-not-called
pass
class BaseVoiceoverApplicationUnitTests(test_utils.GenericTestBase):
"""Tests for the BaseVoiceoverApplication class."""
def setUp(self):
super(BaseVoiceoverApplicationUnitTests, self).setUp()
self.base_voiceover_application = MockInvalidVoiceoverApplication()
def test_base_class_init_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseVoiceoverApplication should implement '
'__init__.'):
suggestion_registry.BaseVoiceoverApplication()
def test_base_class_accept_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseVoiceoverApplication should implement accept.'):
self.base_voiceover_application.accept()
def test_base_class_reject_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseVoiceoverApplication should implement reject.'):
self.base_voiceover_application.reject()
class ExplorationVoiceoverApplicationUnitTest(test_utils.GenericTestBase):
"""Tests for the ExplorationVoiceoverApplication class."""
def setUp(self):
super(ExplorationVoiceoverApplicationUnitTest, self).setUp()
self.signup('[email protected]', 'author')
self.author_id = self.get_user_id_from_email('[email protected]')
self.signup('[email protected]', 'reviewer')
self.reviewer_id = self.get_user_id_from_email('[email protected]')
self.voiceover_application = (
suggestion_registry.ExplorationVoiceoverApplication(
'application_id', 'exp_id', suggestion_models.STATUS_IN_REVIEW,
self.author_id, None, 'en', 'audio_file.mp3', '<p>Content</p>',
None))
def test_validation_with_invalid_target_type_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.target_type = 'invalid_target'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_type to be among allowed choices, '
'received invalid_target'
):
self.voiceover_application.validate()
def test_validation_with_invalid_target_id_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.target_id = 123
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected target_id to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_status_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.status = 'invalid_status'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected status to be among allowed choices, '
'received invalid_status'
):
self.voiceover_application.validate()
def test_validation_with_invalid_author_id_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.author_id = 123
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_final_reviewer_id_raise_exception(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.voiceover_application.validate()
self.voiceover_application.final_reviewer_id = 123
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be None as the '
'voiceover application is not yet handled.'
):
self.voiceover_application.validate()
def test_validation_for_handled_application_with_invalid_final_review(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.voiceover_application.validate()
self.voiceover_application.status = suggestion_models.STATUS_ACCEPTED
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'
):
self.voiceover_application.validate()
def test_validation_for_rejected_application_with_no_message(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.rejection_message, None)
self.voiceover_application.validate()
self.voiceover_application.final_reviewer_id = 'reviewer_id'
self.voiceover_application.status = suggestion_models.STATUS_REJECTED
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected rejection_message to be a string for a '
'rejected application'
):
self.voiceover_application.validate()
def test_validation_for_accepted_application_with_message(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.rejection_message, None)
self.voiceover_application.validate()
self.voiceover_application.final_reviewer_id = 'reviewer_id'
self.voiceover_application.status = suggestion_models.STATUS_ACCEPTED
self.voiceover_application.rejection_message = 'Invalid message'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected rejection_message to be None for the accepted '
'voiceover application, received Invalid message'
):
self.voiceover_application.validate()
def test_validation_with_invalid_language_code_type_raise_exception(self):
self.assertEqual(self.voiceover_application.language_code, 'en')
self.voiceover_application.validate()
self.voiceover_application.language_code = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected language_code to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_language_code_raise_exception(self):
self.assertEqual(self.voiceover_application.language_code, 'en')
self.voiceover_application.validate()
self.voiceover_application.language_code = 'invalid language'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid language'
):
self.voiceover_application.validate()
def test_validation_with_invalid_filename_type_raise_exception(self):
self.assertEqual(self.voiceover_application.filename, 'audio_file.mp3')
self.voiceover_application.validate()
self.voiceover_application.filename = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected filename to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_content_type_raise_exception(self):
self.assertEqual(self.voiceover_application.content, '<p>Content</p>')
self.voiceover_application.validate()
self.voiceover_application.content = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected content to be a string'
):
self.voiceover_application.validate()
def test_to_dict_returns_correct_dict(self):
self.voiceover_application.accept(self.reviewer_id)
expected_dict = {
'voiceover_application_id': 'application_id',
'target_type': 'exploration',
'target_id': 'exp_id',
'status': 'accepted',
'author_name': 'author',
'final_reviewer_name': 'reviewer',
'language_code': 'en',
'content': '<p>Content</p>',
'filename': 'audio_file.mp3',
'rejection_message': None
}
self.assertEqual(
self.voiceover_application.to_dict(), expected_dict)
def test_is_handled_property_returns_correct_value(self):
self.assertFalse(self.voiceover_application.is_handled)
self.voiceover_application.accept(self.reviewer_id)
self.assertTrue(self.voiceover_application.is_handled)
def test_accept_voiceover_application(self):
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.assertEqual(self.voiceover_application.status, 'review')
self.voiceover_application.accept(self.reviewer_id)
self.assertEqual(
self.voiceover_application.final_reviewer_id, self.reviewer_id)
self.assertEqual(self.voiceover_application.status, 'accepted')
def test_reject_voiceover_application(self):
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.assertEqual(self.voiceover_application.status, 'review')
self.voiceover_application.reject(self.reviewer_id, 'rejection message')
self.assertEqual(
self.voiceover_application.final_reviewer_id, self.reviewer_id)
self.assertEqual(self.voiceover_application.status, 'rejected')
self.assertEqual(
self.voiceover_application.rejection_message, 'rejection message')
class CommunityContributionStatsUnitTests(test_utils.GenericTestBase):
"""Tests for the CommunityContributionStats class."""
translation_reviewer_counts_by_lang_code = {
'hi': 0,
'en': 1
}
translation_suggestion_counts_by_lang_code = {
'fr': 6,
'en': 5
}
question_reviewer_count = 1
question_suggestion_count = 4
negative_count = -1
non_integer_count = 'non_integer_count'
sample_language_code = 'en'
invalid_language_code = 'invalid'
def _assert_community_contribution_stats_is_in_default_state(self):
"""Checks if the community contribution stats is in its default
state.
"""
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self.assertEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
), {})
self.assertEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
), {})
self.assertEqual(
community_contribution_stats.question_reviewer_count, 0)
self.assertEqual(
community_contribution_stats.question_suggestion_count, 0)
def test_initial_object_with_valid_arguments_has_correct_properties(self):
community_contribution_stats = (
suggestion_registry.CommunityContributionStats(
self.translation_reviewer_counts_by_lang_code,
self.translation_suggestion_counts_by_lang_code,
self.question_reviewer_count,
self.question_suggestion_count
)
)
community_contribution_stats.validate()
self.assertEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
self.translation_reviewer_counts_by_lang_code)
self.assertEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
),
self.translation_suggestion_counts_by_lang_code
)
self.assertEqual(
community_contribution_stats.question_reviewer_count,
self.question_reviewer_count
)
self.assertEqual(
community_contribution_stats.question_suggestion_count,
self.question_suggestion_count
)
def test_set_translation_reviewer_count_for_lang_code_updates_empty_dict(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
{self.sample_language_code: 2}
)
def test_set_translation_reviewer_count_for_lang_code_updates_count_value(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
) = {self.sample_language_code: 1}
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
{self.sample_language_code: 2}
)
def test_set_translation_reviewer_count_for_lang_code_adds_new_lang_key(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
) = {'en': 1}
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code('hi', 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
{'en': 1, 'hi': 2}
)
def test_set_translation_suggestion_count_for_lang_code_updates_empty_dict(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
), {self.sample_language_code: 2}
)
def test_set_translation_suggestion_count_for_lang_code_updates_count_value(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
) = {self.sample_language_code: 1}
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
),
{self.sample_language_code: 2}
)
def test_set_translation_suggestion_count_for_lang_code_adds_new_lang_key(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
) = {'en': 1}
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code('hi', 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
),
{'en': 1, 'hi': 2}
)
def test_get_translation_language_codes_that_need_reviewers_for_one_lang(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 1)
language_codes_that_need_reviewers = (
stats.get_translation_language_codes_that_need_reviewers()
)
self.assertEqual(
language_codes_that_need_reviewers, {self.sample_language_code})
def test_get_translation_language_codes_that_need_reviewers_for_multi_lang(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code('hi', 1)
stats.set_translation_suggestion_count_for_language_code('fr', 1)
language_codes_that_need_reviewers = (
stats.get_translation_language_codes_that_need_reviewers()
)
self.assertEqual(
language_codes_that_need_reviewers, {'hi', 'fr'})
def test_get_translation_language_codes_that_need_reviewers_for_no_lang(
self):
stats = suggestion_services.get_community_contribution_stats()
language_codes_that_need_reviewers = (
stats.get_translation_language_codes_that_need_reviewers()
)
self.assertEqual(
language_codes_that_need_reviewers, set())
def test_translation_reviewers_are_needed_if_suggestions_but_no_reviewers(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 1)
self.assertTrue(
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
def test_translation_reviewers_are_needed_if_num_suggestions_past_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 1)
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = (
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
self.assertTrue(reviewers_are_needed)
def test_translation_reviewers_not_needed_if_num_suggestions_eqs_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = (
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
self.assertFalse(reviewers_are_needed)
def test_translation_reviewers_not_needed_if_num_suggestions_less_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 1)
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = (
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
self.assertFalse(reviewers_are_needed)
def test_translation_reviewers_not_needed_if_reviewers_and_no_sugestions(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 1)
self.assertFalse(
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
def test_translation_reviewers_not_needed_if_no_reviewers_no_sugestions(
self):
stats = suggestion_services.get_community_contribution_stats()
self._assert_community_contribution_stats_is_in_default_state()
self.assertFalse(
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
def test_question_reviewers_are_needed_if_suggestions_zero_reviewers(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 1
self.assertTrue(stats.are_question_reviewers_needed())
def test_question_reviewers_are_needed_if_num_suggestions_past_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 2
stats.question_reviewer_count = 1
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = stats.are_question_reviewers_needed()
self.assertTrue(reviewers_are_needed)
def test_question_reviewers_not_needed_if_num_suggestions_eqs_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 2
stats.question_reviewer_count = 2
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = stats.are_question_reviewers_needed()
self.assertFalse(reviewers_are_needed)
def test_question_reviewers_not_needed_if_num_suggestions_less_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 1
stats.question_reviewer_count = 2
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = stats.are_question_reviewers_needed()
self.assertFalse(reviewers_are_needed)
def test_question_reviewers_not_needed_if_no_reviewers_no_sugestions(
self):
stats = suggestion_services.get_community_contribution_stats()
self._assert_community_contribution_stats_is_in_default_state()
self.assertFalse(stats.are_question_reviewers_needed())
def test_validate_translation_reviewer_counts_fails_for_negative_counts(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, self.negative_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation reviewer count to be non-negative for '
'%s language code, received: %s.' % (
self.sample_language_code, self.negative_count)
):
community_contribution_stats.validate()
def test_validate_translation_suggestion_counts_fails_for_negative_counts(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, self.negative_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation suggestion count to be non-negative for '
'%s language code, received: %s.' % (
self.sample_language_code, self.negative_count)
):
community_contribution_stats.validate()
def test_validate_question_reviewer_count_fails_for_negative_count(self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_reviewer_count = (
self.negative_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question reviewer count to be non-negative, '
'received: %s.' % (
community_contribution_stats.question_reviewer_count)
):
community_contribution_stats.validate()
def test_validate_question_suggestion_count_fails_for_negative_count(self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_suggestion_count = (
self.negative_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question suggestion count to be non-negative, '
'received: %s.' % (
community_contribution_stats.question_suggestion_count)
):
community_contribution_stats.validate()
def test_validate_translation_reviewer_counts_fails_for_non_integer_counts(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, self.non_integer_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation reviewer count to be an integer for '
'%s language code, received: %s.' % (
self.sample_language_code, self.non_integer_count)
):
community_contribution_stats.validate()
def test_validate_translation_suggestion_counts_fails_for_non_integer_count(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, self.non_integer_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation suggestion count to be an integer for '
'%s language code, received: %s.' % (
self.sample_language_code, self.non_integer_count)
):
community_contribution_stats.validate()
def test_validate_question_reviewer_count_fails_for_non_integer_count(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_reviewer_count = (
self.non_integer_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question reviewer count to be an integer, '
'received: %s.' % (
community_contribution_stats.question_reviewer_count)
):
community_contribution_stats.validate()
def test_validate_question_suggestion_count_fails_for_non_integer_count(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_suggestion_count = (
self.non_integer_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question suggestion count to be an integer, '
'received: %s.' % (
community_contribution_stats.question_suggestion_count)
):
community_contribution_stats.validate()
def test_validate_translation_reviewer_counts_fails_for_invalid_lang_code(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.invalid_language_code, 1)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Invalid language code for the translation reviewer counts: '
'%s.' % self.invalid_language_code
):
community_contribution_stats.validate()
def test_validate_translation_suggestion_counts_fails_for_invalid_lang_code(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.invalid_language_code, 1)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Invalid language code for the translation suggestion counts: '
'%s.' % self.invalid_language_code
):
community_contribution_stats.validate()
class ReviewableSuggestionEmailInfoUnitTests(test_utils.GenericTestBase):
"""Tests for the ReviewableSuggestionEmailInfo class."""
suggestion_type = feconf.SUGGESTION_TYPE_ADD_QUESTION
language_code = 'en'
suggestion_content = 'sample question'
submission_datetime = datetime.datetime.utcnow()
def test_initial_object_with_valid_arguments_has_correct_properties(self):
reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
self.suggestion_type, self.language_code,
self.suggestion_content, self.submission_datetime
)
)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_type,
self.suggestion_type)
self.assertEqual(
reviewable_suggestion_email_info.language_code,
self.language_code)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_content,
self.suggestion_content)
self.assertEqual(
reviewable_suggestion_email_info.submission_datetime,
self.submission_datetime)
| 42.685196 | 80 | 0.661365 | [
"Apache-2.0"
] | AdityaDubey0/oppia | core/domain/suggestion_registry_test.py | 141,288 | Python |
'''
Given a string, write a function that uses recursion to output a
list of all the possible permutations of that string.
For example, given s='abc' the function should return ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
Note: If a character is repeated, treat each occurence as distinct,
for example an input of 'xxx' would return a list with 6 "versions" of 'xxx'
'''
from nose.tools import assert_equal
def permute(s):
out = []
# Base case
if (len(s) == 1):
out = [s]
else:
# For every letter in string
for i, let in enumerate(s):
# For every permutation
for perm in permute(s[:i] + s[i + 1:]):
# Add it to the output
out += [let + perm]
return out
class TestPerm(object):
def test(self, solution):
assert_equal(sorted(solution('abc')), sorted(
['abc', 'acb', 'bac', 'bca', 'cab', 'cba']))
assert_equal(sorted(solution('dog')), sorted(
['dog', 'dgo', 'odg', 'ogd', 'gdo', 'god']))
print('All test cases passed.')
# Run Tests
t = TestPerm()
t.test(permute)
| 23 | 96 | 0.573203 | [
"MIT"
] | washimimizuku/python-data-structures-and-algorithms | udemy-data-structures-and-algorithms/15-recursion/15.8_string_permutation.py | 1,127 | Python |
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test generate RPC."""
from test_framework.test_framework import MAGATestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class RPCGenerateTest(MAGATestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
message = (
"generate\n"
"has been replaced by the -generate "
"cli option. Refer to -help for more information."
)
self.log.info("Test rpc generate raises with message to use cli option")
assert_raises_rpc_error(-32601, message, self.nodes[0].rpc.generate)
self.log.info("Test rpc generate help prints message to use cli option")
assert_equal(message, self.nodes[0].help("generate"))
self.log.info("Test rpc generate is a hidden command not discoverable in general help")
assert message not in self.nodes[0].help()
if __name__ == "__main__":
RPCGenerateTest().main()
| 31.945946 | 95 | 0.690355 | [
"MIT"
] | hhhogannwo/bitcoin | test/functional/rpc_generate.py | 1,182 | Python |
# Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based TTS ESPnet model."""
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Any
from typing import Dict
from typing import Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch < 1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetGANTTSModel(AbsGANESPnetModel):
"""GAN-based TTS ESPnet model."""
def __init__(
self,
feats_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
tts: AbsGANTTS,
):
"""Initialize ESPnetGANTTSModel module."""
assert check_argument_types()
super().__init__()
self.feats_extract = feats_extract
self.normalize = normalize
self.tts = tts
assert hasattr(
tts, "generator"
), "generator module must be resistered as tts.generator"
assert hasattr(
tts, "discriminator"
), "discriminator module must be resistered as tts.discriminator"
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
"""Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
with autocast(False):
# Extract features
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Make batch for tts inputs
batch = {}
batch.update(text=text, text_lengths=text_lengths)
batch.update(forward_generator=forward_generator)
# Update kwargs for additional auxiliary inputs
if feats is not None:
batch.update(feats=feats, feats_lengths=feats_lengths)
if self.tts.require_raw_speech:
batch.update(speech=speech, speech_lengths=speech_lengths)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
return self.tts(**batch)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker index tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
return feats_dict
| 35.363014 | 81 | 0.627929 | [
"Apache-2.0"
] | actboy/espnet | espnet2/gan_tts/espnet_model.py | 5,163 | Python |
import time
import pytest
from celery.result import GroupResult
from celery.schedules import crontab
from kombu.exceptions import EncodeError
from director import build_celery_schedule
from director.exceptions import WorkflowSyntaxError
from director.models.tasks import Task
from director.models.workflows import Workflow
KEYS = ["id", "created", "updated", "task"]
def test_execute_one_task_success(app, create_builder):
workflow, builder = create_builder("example", "WORKFLOW", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 3
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_EXAMPLE"
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 1
assert tasks[0].key == "TASK_EXAMPLE"
assert tasks[0].status.value == "pending"
# Tasks executed in Celery
result = builder.run()
assert result.get() is None
assert result.parent.parent.get() is None
assert result.parent.get() == "task_example"
assert result.parent.state == "SUCCESS"
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task = Task.query.filter_by(id=tasks[0].id).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "success"
assert task.status.value == "success"
def test_execute_one_task_error(app, create_builder):
workflow, builder = create_builder("example", "ERROR", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 3
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_ERROR"
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 1
assert tasks[0].key == "TASK_ERROR"
assert tasks[0].status.value == "pending"
# Tasks executed in Celery
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task = Task.query.filter_by(id=tasks[0].id).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "error"
assert task.status.value == "error"
def test_execute_chain_success(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_CHAIN", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 5
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert [c.task for c in builder.canvas[1:-1]] == ["TASK_A", "TASK_B", "TASK_C"]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
assert result.get() is None
assert result.parent.parent.parent.parent.get() is None
assert result.parent.get() == "task_c"
assert result.parent.state == "SUCCESS"
assert result.parent.parent.get() == "task_b"
assert result.parent.parent.state == "SUCCESS"
assert result.parent.parent.parent.get() == "task_a"
assert result.parent.parent.parent.state == "SUCCESS"
# DB rows status updated
time.sleep(0.5)
with app.app_context():
tasks = Task.query.filter_by(id=tasks[0].id).all()
workflow = Workflow.query.filter_by(id=tasks[0].workflow_id).first()
assert workflow.status.value == "success"
for task in tasks:
assert task.status.value == "success"
def test_execute_chain_error(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_CHAIN_ERROR", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 5
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert [c.task for c in builder.canvas[1:-1]] == ["TASK_A", "TASK_B", "TASK_ERROR"]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_ERROR"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_b = Task.query.filter_by(key="TASK_B").first()
task_error = Task.query.filter_by(key="TASK_ERROR").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_b.status.value == "success"
assert task_error.status.value == "error"
assert workflow.status.value == "error"
def test_execute_group_success(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_GROUP", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 4
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_A"
group_tasks = builder.canvas[2].tasks
assert len(group_tasks) == 2
assert [group_tasks[0].task, group_tasks[1].task] == [
"TASK_B",
"TASK_C",
]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
assert result.get() is None
assert result.parent.parent.get() == "task_a"
assert isinstance(result.parent, GroupResult)
assert result.parent.get() == ["task_b", "task_c"]
# DB rows status updated
time.sleep(0.5)
with app.app_context():
tasks = Task.query.filter_by(id=tasks[0].id).all()
workflow = Workflow.query.filter_by(id=tasks[0].workflow_id).first()
assert workflow.status.value == "success"
for task in tasks:
assert task.status.value == "success"
def test_execute_group_error(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_GROUP_ERROR", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 4
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_A"
group_tasks = builder.canvas[2].tasks
assert len(group_tasks) == 2
assert [group_tasks[0].task, group_tasks[1].task] == ["TASK_ERROR", "TASK_C"]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_ERROR", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_error = Task.query.filter_by(key="TASK_ERROR").first()
task_c = Task.query.filter_by(key="TASK_C").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_error.status.value == "error"
assert task_c.status.value == "success"
assert workflow.status.value == "error"
@pytest.mark.skip_no_worker()
def test_execute_celery_error_one_task(app, create_builder):
workflow, builder = create_builder("example", "CELERY_ERROR_ONE_TASK", {})
assert workflow["status"] == "pending"
# Tasks executed in Celery
result = builder.run()
with pytest.raises(EncodeError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task = Task.query.order_by(Task.created_at.asc()).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "error"
assert task.status.value == "error"
@pytest.mark.skip_no_worker()
def test_execute_celery_error_multiple_tasks(app, create_builder):
workflow, builder = create_builder("example", "CELERY_ERROR_MULTIPLE_TASKS", {})
assert workflow["status"] == "pending"
# Tasks executed in Celery
result = builder.run()
with pytest.raises(EncodeError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_celery_error = Task.query.filter_by(key="TASK_CELERY_ERROR").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_celery_error.status.value == "error"
assert workflow.status.value == "error"
def test_return_values(app, create_builder):
workflow, builder = create_builder("example", "RETURN_VALUES", {})
result = builder.run()
time.sleep(0.5)
with app.app_context():
tasks = {t.key: t.result for t in Task.query.all()}
assert tasks["STR"] == "return_value"
assert tasks["INT"] == 1234
assert tasks["LIST"] == ["jack", "sape", "guido"]
assert tasks["NONE"] is None
assert tasks["DICT"] == {"foo": "bar"}
assert tasks["NESTED"] == {
"jack": 4098,
"sape": 4139,
"guido": 4127,
"nested": {"foo": "bar"},
"none": None,
"list": ["jack", "sape", "guido"],
}
def test_return_exception(app, create_builder):
workflow, builder = create_builder("example", "RETURN_EXCEPTION", {})
result = builder.run()
time.sleep(0.5)
with app.app_context():
tasks = {t.key: t.result for t in Task.query.all()}
assert tasks["STR"] == "return_value"
assert list(tasks["TASK_ERROR"].keys()) == ["exception", "traceback"]
assert tasks["TASK_ERROR"]["exception"] == "division by zero"
assert tasks["TASK_ERROR"]["traceback"].startswith(
"Traceback (most recent call last)"
)
assert "ZeroDivisionError: division by zero" in tasks["TASK_ERROR"]["traceback"]
def test_build_celery_schedule_float_with_payload():
float_schedule = {"payload": {}, "schedule": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
def test_build_celery_schedule_float():
float_schedule = {"schedule": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
@pytest.mark.parametrize(
"test_input, expected",
[
("1 * * * *", crontab(minute="1", hour="*", day_of_week="*", day_of_month="*", month_of_year="*")),
("* 1 * * *", crontab(minute="*", hour="1", day_of_week="*", day_of_month="*", month_of_year="*")),
("* * 1 * *", crontab(minute="*", hour="*", day_of_week="1", day_of_month="*", month_of_year="*")),
("* * * 1 *", crontab(minute="*", hour="*", day_of_week="*", day_of_month="1", month_of_year="*")),
("* * * * 1", crontab(minute="*", hour="*", day_of_week="*", day_of_month="*", month_of_year="1")),
(
"*/10 */11 */12 */13 */14",
crontab(minute="*/10", hour="*/11", day_of_week="*/12", day_of_month="*/13", month_of_year="*/14")
)
]
)
def test_build_celery_schedule_crontab(test_input, expected):
cron_schedule = {"schedule": test_input}
assert (test_input, expected) == build_celery_schedule("workflow_crontab", cron_schedule)
def test_build_celery_interval():
float_schedule = {"interval": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
@pytest.mark.parametrize(
"test_input, expected",
[
("1 * * * *", crontab(minute="1", hour="*", day_of_month="*", month_of_year="*", day_of_week="*")),
("* 1 * * *", crontab(minute="*", hour="1", day_of_month="*", month_of_year="*", day_of_week="*")),
("* * 1 * *", crontab(minute="*", hour="*", day_of_month="1", month_of_year="*", day_of_week="*")),
("* * * 1 *", crontab(minute="*", hour="*", day_of_month="*", month_of_year="1", day_of_week="*")),
("* * * * 1", crontab(minute="*", hour="*", day_of_month="*", month_of_year="*", day_of_week="1")),
(
"*/10 */11 */12 */13 */14",
crontab(minute="*/10", hour="*/11", day_of_month="*/12", month_of_year="*/13", day_of_week="*/14")
)
]
)
def test_build_celery_crontab(test_input, expected):
cron_schedule = {"crontab": test_input}
assert (test_input, expected) == build_celery_schedule("workflow_crontab", cron_schedule)
def test_build_celery_invalid_crontab():
# missing one element on the crontab syntax
periodic_conf = {"crontab": "* * * *"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_crontab", periodic_conf)
def test_build_celery_invalid_schedule():
cron_schedule = {"crontab": "* * * * 12"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_crontab", cron_schedule)
def test_build_celery_invalid_periodic_key():
cron_schedule = {"non_valid_key": "* * * * *"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_key", cron_schedule)
| 36.959079 | 110 | 0.652135 | [
"BSD-3-Clause"
] | PaulinCharliquart/celery-director | tests/test_workflows.py | 14,451 | Python |
import logging
import time
from abc import abstractmethod
from enum import Enum
from typing import Dict, Callable, Any, List
from schema import Schema
import sqlalchemy
from sqlalchemy.engine import ResultProxy
from sqlalchemy.orm import Query
from sqlalchemy.schema import Table
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.base import Connection
from contextlib import contextmanager
from flask_app.utilities.DataInterfaces import ConnectionOptions
logger = logging.getLogger(__name__)
class SqlDialect(Enum):
postgres = "postgres"
sqlite = "sqlite"
@classmethod
def has_value(cls, value) -> bool:
return any(value == item.value for item in cls)
# TODO: Connection Factory
class SqlConnectionOptions(ConnectionOptions):
@staticmethod
def factory(sql_connection_type: SqlDialect, **kwargs) -> 'SqlConnectionOptions':
"""
Function signatures for factory method
Postgres: (dialect: SqlDialects, host: str, port: int, username: str, password: str,
database_name: str, timeout: int = None)
"""
return SqlConnectionFactories.get_factory(sql_connection_type)(**kwargs)
def __init__(self, dialect: SqlDialect, host: str, port: int, username: str, password: str, database_name: str
, timeout_s: int = None):
self.dialect: SqlDialect = dialect
self.host: str = host
self.port: int = port
self.username: str = username
self.password: str = password
self.database_name: str = database_name
self.timeout: int = timeout_s
self.connection_string: str = None
@classmethod
@abstractmethod
def schema_validate_arguments(cls, schema: Schema, parameters: Dict) -> Dict:
pass
class PostgresConnectionOptions(SqlConnectionOptions):
_factory_schema: Schema = Schema(
{
'host': str,
'port': int,
'username': str,
'password': str,
'database_name': str
# 'timeout': int
},
ignore_extra_keys=True
)
def __init__(self,
dialect: SqlDialect,
host: str,
port: int,
username: str,
password: str,
database_name: str,
timeout_s: int = None) -> None:
super().__init__(dialect, host, port, username, password, database_name, timeout_s)
self.connection_string = \
f"postgresql://{self.username}:{self.password}@{self.host}:{self.port}/{self.database_name}"
@classmethod
def schema_validate_arguments(cls, schema: Schema, parameters: Dict) -> Dict:
return schema.validate(parameters)
@classmethod
def factory(cls, **kwargs) -> 'PostgresConnectionOptions':
parameters: Dict = cls.schema_validate_arguments(cls._factory_schema, kwargs)
return cls(SqlDialect.postgres, parameters['host'], parameters['port']
, parameters['username'], parameters['password'], parameters['database_name']
, parameters.get('timeout'))
class SqlConnectionFactories:
_factories: Dict[SqlDialect, Callable] = {
SqlDialect.postgres: PostgresConnectionOptions.factory
# , SqlDialects.sqlite: SqliteConnectionOptions.factory
}
@classmethod
def get_factory(cls, factory_type: SqlDialect) -> Callable:
return cls._factories[factory_type]
class SqlInterface:
"""SQL methods to tack onto SQL based librarians"""
def __init__(self, connection_options: SqlConnectionOptions) -> None:
self.connection_options = connection_options
self.sql_engine: Engine = None
self.sql_metadata: sqlalchemy.MetaData = None
def update(self, schema: str, table: str, column: str, value: Any, sql_connection: Connection) -> None:
raise NotImplementedError
def select(self, schema: str, table: str, sql_connection: Connection) -> List[Dict[str, Any]]:
sql_table: Table = self._get_table_reflection(schema, table)
return self._execute_query(sql_connection, sql_table.select())
def insert(self, schema: str, table: str, values: List[Dict[str, Any]], sql_connection: Connection) -> None:
sql_table: Table = self._get_table_reflection(schema, table)
insert_query = sql_table.insert(values=values)
self._execute_query(sql_connection, insert_query)
def setup_pre_connection(self, connection_options) -> None:
self._build_engine(connection_options)
self._metadata_reflection(self.sql_engine)
def close_connection(self, sql_connection: Connection) -> None:
if sql_connection is not None:
sql_connection.close()
@contextmanager
def managed_connection(self, connection_options: SqlConnectionOptions = None) -> Connection:
if connection_options is None:
connection_options = self.connection_options
self.setup_pre_connection(connection_options)
connection: Connection = None
try:
connection = self.sql_engine.connect()
yield connection
finally:
self.close_connection(connection)
# SQLAlchemy internal methods
def _build_engine(self, connection_options: SqlConnectionOptions) -> None:
self.sql_engine = sqlalchemy.create_engine(connection_options.connection_string)
def _metadata_reflection(self, sql_engine) -> None:
self.sql_metadata = sqlalchemy.MetaData(bind=sql_engine)
def _get_table_reflection(self, schema: str, table: str) -> Table:
return Table(table, self.sql_metadata, schema=schema, autoload=True)
def _validate_write_schema(self, table: Table, values: Dict[str, Any]) -> bool:
table_columns = list(dict(table.columns).keys())
return list(values.keys()) == table_columns
def _parse_result_proxy(self, result) -> List[Dict[str, Any]]:
return list(map(lambda x: dict(x), result))
def _execute_query(self, sql_connection: Connection, sql_query: Query) -> List[Dict[str, Any]]:
start_time: float = time.time()
return_result: List[Dict[str, Any]] = None
try:
result: ResultProxy = sql_connection.execute(sql_query)
if result.returns_rows:
return_result: List[Dict[str, Any]] = self._parse_result_proxy(result)
except Exception as e:
logger.info(f"SQL query failed: {e}")
logger.debug(f"SQL query {str(sql_query.compile())}, connection: {sql_connection.engine} failed with exception {e}")
raise e
finally:
end_time: float = time.time()
query_time: float = end_time - start_time
logger.info(f"SQL execute time: {query_time}")
logger.debug(
f"SQL execute time: {query_time}, query: {str(sql_query.compile())}, connection: {sql_connection.engine}"
)
return return_result
| 37.602151 | 128 | 0.666285 | [
"MIT"
] | cliftbar/flask_app_template | flask_app/utilities/DataInterfaces/SqlInterface.py | 6,994 | Python |
INPUTPATH = "input.txt"
#INPUTPATH = "input-test.txt"
with open(INPUTPATH) as ifile:
raw = ifile.read()
from typing import Tuple
def line_to_pos(line: str) -> Tuple[int, ...]:
filtered = "".join(c for c in line if c.isdigit() or c in {"-", ","})
return tuple(map(int, filtered.split(",")))
starts = tuple(zip(*map(line_to_pos, raw.strip().split("\n"))))
from itertools import combinations
from typing import List, Iterable
class Axis:
poss: List[int]
vels: List[int]
def __init__(self, start_poss: Iterable[int]) -> None:
self.poss = list(start_poss)
self.vels = [0] * len(self.poss)
def __eq__(self, other) -> bool:
return self.poss == other.poss and self.vels == other.vels
def step(self) -> None:
for i, j in combinations(range(len(self.poss)), 2):
a, b = self.poss[i], self.poss[j]
diff = 1 if a < b else -1 if a > b else 0
self.vels[i] += diff
self.vels[j] -= diff
for i, vel in enumerate(self.vels):
self.poss[i] += vel
system = tuple(map(Axis, starts))
for axis in system:
for _ in range(1000):
axis.step()
pos_by_moon = zip(*(axis.poss for axis in system))
vel_by_moon = zip(*(axis.vels for axis in system))
print(sum(
sum(map(abs, pos)) * sum(map(abs, vel))
for pos, vel in zip(pos_by_moon, vel_by_moon)
))
def cycle_period(start_poss: Iterable[int]) -> int:
tort = Axis(start_poss) # Get some rest, buddy. :3
hare = Axis(tort.poss) # Up for a run? >:3c
hare.step()
steps = 1
while hare != tort:
hare.step()
steps += 1
return steps
from math import lcm
print(lcm(*map(cycle_period, starts)))
| 29.884615 | 70 | 0.664093 | [
"Unlicense"
] | Floozutter/aoc-2019-python | day12/main.py | 1,554 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cplotting as cplot
S={2+2j, 3+2j, 1.75+1j, 2+1j, 2.25+1j, 2.5+1j, 2.75+1j, 3+1j, 3.25+1j}
cplot.plot({1+2j+z for z in S},4)
cplot.show()
| 19.3 | 70 | 0.590674 | [
"BSD-3-Clause"
] | RyodoTanaka/Cording_Matrix | python/chap_1/1.4.3.py | 193 | Python |
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=100, metric='mAP')
optimizer = dict(
type='Adam',
lr=0.0015,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[200, 260])
total_epochs = 300
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=1,
scale_aware_sigma=False,
)
# model settings
model = dict(
type='BottomUp',
pretrained='models/pytorch/imagenet/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='BottomUpSimpleHead',
in_channels=32,
num_joints=17,
num_deconv_layers=0,
tag_per_joint=True,
with_ae_loss=[True],
extra=dict(final_conv_kernel=1, )),
train_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
img_size=data_cfg['image_size']),
test_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
max_num_people=30,
scale_factor=[1],
with_heatmaps=[True],
with_ae=[True],
project2image=True,
nms_kernel=5,
nms_padding=2,
tag_per_joint=True,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False,
adjust=True,
refine=True,
flip_test=True),
loss_pose=dict(
type='MultiLossFactory',
num_joints=17,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=[True],
push_loss_factor=[0.001],
pull_loss_factor=[0.001],
with_heatmaps_loss=[True],
heatmaps_loss_factor=[1.0],
),
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='BottomUpRandomAffine',
rot_factor=30,
scale_factor=[0.75, 1.5],
scale_type='short',
trans_factor=40),
dict(type='BottomUpRandomFlip', flip_prob=0.5),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='BottomUpGenerateTarget',
sigma=2,
max_num_people=30,
),
dict(
type='Collect',
keys=['img', 'joints', 'targets', 'masks'],
meta_keys=[]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BottomUpGetImgSize', test_scale_factor=[1]),
dict(
type='BottomUpResizeAlign',
transforms=[
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
dict(
type='Collect',
keys=[
'img',
],
meta_keys=[
'image_file', 'aug_data', 'test_scale_factor', 'base_size',
'center', 'scale', 'flip_index'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=24,
workers_per_gpu=1,
train=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 26.852792 | 76 | 0.561248 | [
"Apache-2.0"
] | RuisongZhou/mmpose | configs/bottom_up/hrnet/coco/hrnet_w32_coco_512x512.py | 5,290 | Python |
import streamlit as st
import warnings
try:
from streamlit_terran_timeline import terran_timeline, generate_timeline
except ImportError:
warnings.warn(
"Failed to load terran_timeline from streamlit_terran_timeline. "
"Please run 'pip install streamlit_terran_timeline' or "
"'pip install .' if working locally"
)
exit(1)
st.header("Face-recognition interactive-timeline generator")
st.write(
"In this demo we show you how easy it is to create an interactive"
"timeline chart of faces detected on videos. Thanksfully, there's an open "
"source project called Terran that makes all this process super super easy!"
)
st.write("More descriptions here")
st.subheader("Loading your video")
st.write(
"You can select videos from **multiple sources**: "
"YouTube and almost any video streaming platform, or any local file"
)
#
# Ask the user to input a video link or path and show the video below
#
video_path = st.text_input(
"Link or path to video", "https://www.youtube.com/watch?v=v2VgA_MCNDg"
)
#
# Show the actual faces timeline chart
#
st.subheader("Faces timeline chart")
st.write("")
@st.cache(persist=True, ttl=86_400, suppress_st_warning=True, show_spinner=False)
def _generate_timeline(video_path):
timeline = generate_timeline(
video_src=video_path,
appearence_threshold=5,
batch_size=32,
duration=None,
framerate=8,
output_directory="timelines",
ref_directory=None,
similarity_threshold=0.75,
start_time=0,
thumbnail_rate=1,
)
return timeline
with st.spinner("Generating timeline"):
timeline = _generate_timeline(video_path)
start_time = terran_timeline(timeline)
st.video(video_path, start_time=int(start_time))
| 26.014493 | 81 | 0.71532 | [
"MIT"
] | cenkbircanoglu/streamlit-terran-timeline | streamlit_terran_timeline/examples/youtube.py | 1,795 | Python |
# from blazingsql import BlazingContext
from Configuration import ExecutionMode
from Configuration import Settings as Settings
# from dask.distributed import Client
from DataBase import createSchema as createSchema
# from EndToEndTests import countDistincTest
from EndToEndTests import (
GroupByWitoutAggregations,
aggregationsWithoutGroupByTest,
bindableAliasTest,
booleanTest,
caseTest,
castTest,
)
from EndToEndTests import coalesceTest as coalesceTest
from EndToEndTests import columnBasisTest as columnBasisTest
from EndToEndTests import (
commonTableExpressionsTest,
concatTest,
countWithoutGroupByTest,
dateTest,
dirTest,
fileSystemGSTest,
fileSystemLocalTest,
fileSystemS3Test,
)
from EndToEndTests import fullOuterJoinsTest as fullOuterJoinsTest
from EndToEndTests import groupByTest as groupByTest
from EndToEndTests import innerJoinsTest as innerJoinsTest
from EndToEndTests import crossJoinsTest as crossJoinsTest
from EndToEndTests import leftOuterJoinsTest as leftOuterJoinsTest
from EndToEndTests import (
likeTest,
literalTest,
# loadDataTest,
nestedQueriesTest,
nonEquiJoinsTest,
)
from EndToEndTests import orderbyTest as orderbyTest
from EndToEndTests import (
predicatesWithNulls,
roundTest,
simpleDistributionTest,
stringTests,
substringTest,
tablesFromPandasTest,
# timestampdiffTest,
timestampTest,
tpchQueriesTest,
)
from EndToEndTests import unaryOpsTest as unaryOpsTest
from EndToEndTests import unifyTablesTest
from EndToEndTests import unionTest as unionTest
from EndToEndTests import useLimitTest
from EndToEndTests import whereClauseTest as whereClauseTest
from EndToEndTests import wildCardTest
from pynvml import nvmlInit
from pyspark.sql import SparkSession
from Runner import runTest
from Utils import Execution, init_context
def main():
print("**init end2end**")
Execution.getArgs()
nvmlInit()
dir_data_file = Settings.data["TestSettings"]["dataDirectory"]
nRals = Settings.data["RunSettings"]["nRals"]
drill = "drill"
spark = "spark"
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if (
Settings.execution_mode == ExecutionMode.FULL and compareResults == "true"
) or Settings.execution_mode == ExecutionMode.GENERATOR:
# Create Table Drill -----------------------------------------
from pydrill.client import PyDrill
drill = PyDrill(host="localhost", port=8047)
createSchema.init_drill_schema(
drill, Settings.data["TestSettings"]["dataDirectory"], bool_test=True
)
# Create Table Spark -------------------------------------------------
spark = SparkSession.builder.appName("allE2ETest").getOrCreate()
createSchema.init_spark_schema(
spark, Settings.data["TestSettings"]["dataDirectory"]
)
# Create Context For BlazingSQL
bc, dask_client = init_context()
targetTestGroups = Settings.data["RunSettings"]["targetTestGroups"]
runAllTests = (
len(targetTestGroups) == 0
) # if targetTestGroups was empty the user wants to run all the tests
if runAllTests or ("aggregationsWithoutGroupByTest" in targetTestGroups):
aggregationsWithoutGroupByTest.main(
dask_client, drill, dir_data_file, bc, nRals
)
if runAllTests or ("coalesceTest" in targetTestGroups):
coalesceTest.main(
dask_client, drill, dir_data_file, bc, nRals
) # we are not supporting coalesce yet
if runAllTests or ("columnBasisTest" in targetTestGroups):
columnBasisTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("commonTableExpressionsTest" in targetTestGroups):
commonTableExpressionsTest.main(dask_client, drill, dir_data_file, bc, nRals)
# we are not supporting count distinct yet
# countDistincTest.main(dask_client, drill, dir_data_file, bc)
if runAllTests or ("countWithoutGroupByTest" in targetTestGroups):
countWithoutGroupByTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("dateTest" in targetTestGroups):
dateTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("timestampTest" in targetTestGroups):
timestampTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("fullOuterJoinsTest" in targetTestGroups):
fullOuterJoinsTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("groupByTest" in targetTestGroups):
groupByTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("GroupByWitoutAggregations" in targetTestGroups):
GroupByWitoutAggregations.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("innerJoinsTest" in targetTestGroups):
innerJoinsTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("crossJoinsTest" in targetTestGroups):
crossJoinsTest.main(dask_client, spark, dir_data_file, bc, nRals)
if runAllTests or ("" in targetTestGroups):
leftOuterJoinsTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("nonEquiJoinsTest" in targetTestGroups):
nonEquiJoinsTest.main(dask_client, drill, dir_data_file, bc, nRals)
# loadDataTest.main(dask_client, bc) #check this
if runAllTests or ("nestedQueriesTest" in targetTestGroups):
nestedQueriesTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("orderbyTest" in targetTestGroups):
orderbyTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("predicatesWithNulls" in targetTestGroups):
predicatesWithNulls.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("stringTests" in targetTestGroups):
stringTests.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("tablesFromPandasTest" in targetTestGroups):
tablesFromPandasTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("unaryOpsTest" in targetTestGroups):
unaryOpsTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("unifyTablesTest" in targetTestGroups):
unifyTablesTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("unionTest" in targetTestGroups):
unionTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("useLimitTest" in targetTestGroups):
useLimitTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("whereClauseTest" in targetTestGroups):
whereClauseTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("bindableAliasTest" in targetTestGroups):
bindableAliasTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("booleanTest" in targetTestGroups):
booleanTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("caseTest" in targetTestGroups):
caseTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("castTest" in targetTestGroups):
castTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("concatTest" in targetTestGroups):
concatTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("literalTest" in targetTestGroups):
literalTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("dirTest" in targetTestGroups):
dirTest.main(dask_client, drill, dir_data_file, bc, nRals)
# HDFS is not working yet
# fileSystemHdfsTest.main(dask_client, drill, dir_data_file, bc)
# HDFS is not working yet
# mixedFileSystemTest.main(dask_client, drill, dir_data_file, bc)
if runAllTests or ("likeTest" in targetTestGroups):
likeTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("simpleDistributionTest" in targetTestGroups):
simpleDistributionTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("substringTest" in targetTestGroups):
substringTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("wildCardTest" in targetTestGroups):
wildCardTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("tpchQueriesTest" in targetTestGroups):
tpchQueriesTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("roundTest" in targetTestGroups):
roundTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("fileSystemLocalTest" in targetTestGroups):
fileSystemLocalTest.main(dask_client, drill, dir_data_file, bc, nRals)
if Settings.execution_mode != ExecutionMode.GPUCI:
if runAllTests or ("fileSystemS3Test" in targetTestGroups):
fileSystemS3Test.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("fileSystemGSTest" in targetTestGroups):
fileSystemGSTest.main(dask_client, drill, dir_data_file, bc, nRals)
# timestampdiffTest.main(dask_client, spark, dir_data_file, bc, nRals)
if Settings.execution_mode != ExecutionMode.GENERATOR:
result, error_msgs = runTest.save_log(
Settings.execution_mode == ExecutionMode.GPUCI
)
max = 0
for i in range(0, len(Settings.memory_list)):
if (Settings.memory_list[i].delta) > max:
max = Settings.memory_list[i].delta
print("MAX DELTA: " + str(max))
print(
"""***********************************************************
********************"""
)
for i in range(0, len(Settings.memory_list)):
print(
Settings.memory_list[i].name
+ ":"
+ " Start Mem: "
+ str(Settings.memory_list[i].start_mem)
+ " End Mem: "
+ str(Settings.memory_list[i].end_mem)
+ " Diff: "
+ str(Settings.memory_list[i].delta)
)
return result, error_msgs
return True, []
if __name__ == "__main__":
import time
start = time.time() # in seconds
result, error_msgs = main()
if Settings.execution_mode != ExecutionMode.GENERATOR:
# NOTE kahro william percy mario : here we tell to gpuci there was
# an error comparing with historic results
# TODO william kharoly felipe we should try to enable and
# use this function in the future
result = True
if result is False:
for error_msg in error_msgs:
print(error_msg)
# import sys
end = time.time() # in seconds
elapsed = end - start # in seconds
time_delta_desc = (
str(elapsed / 60)
+ " minutes and "
+ str(int(elapsed) % 60)
+ " seconds"
)
print(
"==>> E2E FAILED against previous run, total time was: "
+ time_delta_desc
)
# TODO percy kharo willian: uncomment this line
# when gpuci has all the env vars set
# return error exit status to the command prompt (shell)
# sys.exit(1)
| 37.28115 | 88 | 0.68592 | [
"Apache-2.0"
] | BroadBridgeNetworks/blazingsql | tests/BlazingSQLTest/EndToEndTests/allE2ETest.py | 11,669 | Python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: geometry.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import boxgeom_pb2 as boxgeom__pb2
import cylindergeom_pb2 as cylindergeom__pb2
import spheregeom_pb2 as spheregeom__pb2
import planegeom_pb2 as planegeom__pb2
import imagegeom_pb2 as imagegeom__pb2
import heightmapgeom_pb2 as heightmapgeom__pb2
import meshgeom_pb2 as meshgeom__pb2
import vector3d_pb2 as vector3d__pb2
import polylinegeom_pb2 as polylinegeom__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='geometry.proto',
package='gazebo.msgs',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0egeometry.proto\x12\x0bgazebo.msgs\x1a\rboxgeom.proto\x1a\x12\x63ylindergeom.proto\x1a\x10spheregeom.proto\x1a\x0fplanegeom.proto\x1a\x0fimagegeom.proto\x1a\x13heightmapgeom.proto\x1a\x0emeshgeom.proto\x1a\x0evector3d.proto\x1a\x12polylinegeom.proto\"\xb5\x04\n\x08Geometry\x12(\n\x04type\x18\x01 \x01(\x0e\x32\x1a.gazebo.msgs.Geometry.Type\x12!\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x14.gazebo.msgs.BoxGeom\x12+\n\x08\x63ylinder\x18\x03 \x01(\x0b\x32\x19.gazebo.msgs.CylinderGeom\x12%\n\x05plane\x18\x04 \x01(\x0b\x32\x16.gazebo.msgs.PlaneGeom\x12\'\n\x06sphere\x18\x05 \x01(\x0b\x32\x17.gazebo.msgs.SphereGeom\x12%\n\x05image\x18\x06 \x01(\x0b\x32\x16.gazebo.msgs.ImageGeom\x12-\n\theightmap\x18\x07 \x01(\x0b\x32\x1a.gazebo.msgs.HeightmapGeom\x12#\n\x04mesh\x18\x08 \x01(\x0b\x32\x15.gazebo.msgs.MeshGeom\x12%\n\x06points\x18\t \x03(\x0b\x32\x15.gazebo.msgs.Vector3d\x12\'\n\x08polyline\x18\n \x03(\x0b\x32\x15.gazebo.msgs.Polyline\"\x93\x01\n\x04Type\x12\x07\n\x03\x42OX\x10\x01\x12\x0c\n\x08\x43YLINDER\x10\x02\x12\n\n\x06SPHERE\x10\x03\x12\t\n\x05PLANE\x10\x04\x12\t\n\x05IMAGE\x10\x05\x12\r\n\tHEIGHTMAP\x10\x06\x12\x08\n\x04MESH\x10\x07\x12\x10\n\x0cTRIANGLE_FAN\x10\x08\x12\x0e\n\nLINE_STRIP\x10\t\x12\x0c\n\x08POLYLINE\x10\n\x12\t\n\x05\x45MPTY\x10\x0b'
,
dependencies=[boxgeom__pb2.DESCRIPTOR,cylindergeom__pb2.DESCRIPTOR,spheregeom__pb2.DESCRIPTOR,planegeom__pb2.DESCRIPTOR,imagegeom__pb2.DESCRIPTOR,heightmapgeom__pb2.DESCRIPTOR,meshgeom__pb2.DESCRIPTOR,vector3d__pb2.DESCRIPTOR,polylinegeom__pb2.DESCRIPTOR,])
_GEOMETRY_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='gazebo.msgs.Geometry.Type',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='BOX', index=0, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CYLINDER', index=1, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SPHERE', index=2, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PLANE', index=3, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='IMAGE', index=4, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='HEIGHTMAP', index=5, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESH', index=6, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRIANGLE_FAN', index=7, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LINE_STRIP', index=8, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='POLYLINE', index=9, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EMPTY', index=10, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=610,
serialized_end=757,
)
_sym_db.RegisterEnumDescriptor(_GEOMETRY_TYPE)
_GEOMETRY = _descriptor.Descriptor(
name='Geometry',
full_name='gazebo.msgs.Geometry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='gazebo.msgs.Geometry.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='box', full_name='gazebo.msgs.Geometry.box', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cylinder', full_name='gazebo.msgs.Geometry.cylinder', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='plane', full_name='gazebo.msgs.Geometry.plane', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sphere', full_name='gazebo.msgs.Geometry.sphere', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='image', full_name='gazebo.msgs.Geometry.image', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='heightmap', full_name='gazebo.msgs.Geometry.heightmap', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mesh', full_name='gazebo.msgs.Geometry.mesh', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='points', full_name='gazebo.msgs.Geometry.points', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='polyline', full_name='gazebo.msgs.Geometry.polyline', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_GEOMETRY_TYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=192,
serialized_end=757,
)
_GEOMETRY.fields_by_name['type'].enum_type = _GEOMETRY_TYPE
_GEOMETRY.fields_by_name['box'].message_type = boxgeom__pb2._BOXGEOM
_GEOMETRY.fields_by_name['cylinder'].message_type = cylindergeom__pb2._CYLINDERGEOM
_GEOMETRY.fields_by_name['plane'].message_type = planegeom__pb2._PLANEGEOM
_GEOMETRY.fields_by_name['sphere'].message_type = spheregeom__pb2._SPHEREGEOM
_GEOMETRY.fields_by_name['image'].message_type = imagegeom__pb2._IMAGEGEOM
_GEOMETRY.fields_by_name['heightmap'].message_type = heightmapgeom__pb2._HEIGHTMAPGEOM
_GEOMETRY.fields_by_name['mesh'].message_type = meshgeom__pb2._MESHGEOM
_GEOMETRY.fields_by_name['points'].message_type = vector3d__pb2._VECTOR3D
_GEOMETRY.fields_by_name['polyline'].message_type = polylinegeom__pb2._POLYLINE
_GEOMETRY_TYPE.containing_type = _GEOMETRY
DESCRIPTOR.message_types_by_name['Geometry'] = _GEOMETRY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Geometry = _reflection.GeneratedProtocolMessageType('Geometry', (_message.Message,), {
'DESCRIPTOR' : _GEOMETRY,
'__module__' : 'geometry_pb2'
# @@protoc_insertion_point(class_scope:gazebo.msgs.Geometry)
})
_sym_db.RegisterMessage(Geometry)
# @@protoc_insertion_point(module_scope)
| 46.721239 | 1,282 | 0.762572 | [
"Apache-2.0"
] | CryptoCopter/pygazebo | pygazebo/msg/geometry_pb2.py | 10,559 | Python |
from aws_cdk import (
core,
aws_iam as iam,
aws_kinesis as kinesis,
aws_kinesisfirehose as kinesisfirehose
)
class Lab07Stack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# The code that defines your stack goes here
role01 = iam.CfnRole(self,id="firehose01_role",assume_role_policy_document= {
"Statement": [{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
}
}],
"Version": "2012-10-17"
},managed_policy_arns=[
"arn:aws:iam::aws:policy/service-role/AWSLambdaKinesisExecutionRole"
])
policy01=iam.CfnPolicy(self,id="firehose01_policy",policy_name="firehose01_policy",policy_document={
'Version': "2012-10-17",
'Statement': [
{
"Action": [
's3:AbortMultipartUpload',
's3:GetBucketLocation',
's3:GetObject',
's3:ListBucket',
's3:ListBucketMultipartUploads',
's3:PutObject'
],
"Resource": ['*'],
"Effect": "Allow"
}
]
},roles=[role01.ref])
delivery_stream = kinesisfirehose.CfnDeliveryStream(self, id = "firehose01",
delivery_stream_name = "firehose01",
extended_s3_destination_configuration = {
# s3桶信息
'bucketArn': 'arn:aws:s3:::fluent-bit-s3',
# 压缩设置,老方案:gzip,新方案待定
'compressionFormat': 'GZIP',
# 格式转换,是否转换为orc,parquet,默认无
'DataFormatConversionConfiguration':"Disabled",
# 是否加密:默认无
'EncryptionConfiguration':"NoEncryption",
# 错误输出前缀
'bufferingHints': {
'intervalInSeconds': 600,
'sizeInMBs': 128
},
'ProcessingConfiguration': {
"Enabled": True,
"Processor": {
"Type": "Lambda",
"Parameters": [
{
"ParameterName": "BufferIntervalInSeconds",
"ParameterValue": "60"
},
{
"ParameterName": "BufferSizeInMBs",
"ParameterValue": "3"
},
{
"ParameterName": "LambdaArn",
"ParameterValue": "arn:aws:lambda:ap-southeast-1:596030579944:function:firehose-test"
}
]
}
},
'roleArn': 'arn:aws:iam::596030579944:role/avalon_lambda_kinesis_role',
'S3BackupConfiguration': {
"BucketARN": 'arn:aws:s3:::fluent-bit-s3',
'bufferingHints': {
'intervalInSeconds': 600,
'sizeInMBs': 128
},
'compressionFormat': 'GZIP',
'EncryptionConfiguration':"NoEncryption",
'Prefix': "/backup",
'roleArn': 'arn:aws:iam::596030579944:role/avalon_lambda_kinesis_role'
}
},
)
| 49.82243 | 149 | 0.298068 | [
"Apache-2.0"
] | stevensu1977/aws-cdk-handson | Lab07/lab07/lab07_stack.py | 5,425 | Python |
"""Main app/routing file for TwitOff"""
from os import getenv
from flask import Flask, render_template, request
from twitoff.twitter import add_or_update_user
from twitoff.models import DB, User, MIGRATE
from twitoff.predict import predict_user
def create_app():
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = getenv("DATABASE_URL")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
DB.init_app(app)
MIGRATE.init_app(app, DB)
# TODO - make rest of application
@app.route('/')
def root():
# SQL equivalent = "SELECT * FROM user;"
return render_template('base.html', title="Home", users=User.query.all())
@app.route("/compare", methods=["POST"])
def compare():
user0, user1 = sorted(
[request.values["user1"], request.values["user2"]])
# conditinoal that prevents same user comparison
if user0 == user1:
message = "Cannot compare users to themselves!"
else:
hypo_tweet_text = request.values["tweet_text"]
# prediction return zero or one depending upon user
prediction = predict_user(user0, user1, hypo_tweet_text)
message = "'{}' is more likely to be said by {} than {}".format(
hypo_tweet_text, user1 if prediction else user0,
user0 if prediction else user1
)
# returns rendered template with dynamic message
return render_template('prediction.html', title="Prediction:", message=message)
@app.route("/user", methods=["POST"])
@app.route("/user/<name>", methods=["GET"])
def user(name=None, message=""):
name = name or request.values["user_name"]
try:
if request.method == "POST":
add_or_update_user(name)
message = "User {} sucessfully added!".format(name)
tweets = User.query.filter(User.name == name).one().tweets
except Exception as e:
message = "Error handling {}: {}".format(name, e)
tweets = []
return render_template("user.html", title=name, tweets=tweets, message=message)
@app.route("/update")
def update():
users = User.query.all()
for user in users:
add_or_update_user(user.name)
return render_template("base.html", title="Database has been updated!", users=User.query.all())
@app.route("/reset")
def reset():
DB.drop_all()
DB.create_all()
return render_template("base.html", title="Reset Database")
return app
| 33.519481 | 103 | 0.618752 | [
"MIT"
] | kvinne-anc/TwittOff | twitoff/app.py | 2,581 | Python |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import logging
import json
DEFAULT_LOG_NAME = 'azure.mgmt.common.filters'
DEFAULT_LOG_LEVEL = logging.DEBUG
DEFAULT_USER_AGENT = ''
class RequestFilter(object):
'''
Send the request.
'''
def __init__(self, session):
if session is None:
raise ValueError('session cannot be None.')
self._session = session
def send(self, prepared_request):
return self._session.send(prepared_request)
class SigningFilter(object):
'''
Sign the request.
'''
def __init__(self, creds):
if creds is None:
raise ValueError('creds cannot be None.')
self._creds = creds
def send(self, prepared_request):
self._creds.sign_request(prepared_request)
return self.next.send(prepared_request)
class UserAgentFilter(object):
'''
Add a user-agent header to the request.
'''
def __init__(self, user_agent):
if user_agent is None:
raise ValueError('user_agent cannot be None.')
self._user_agent = user_agent
def send(self, prepared_request):
prepared_request.headers['user-agent'] = self._user_agent
return self.next.send(prepared_request)
class LogFilter(object):
'''
Log the request to a standard python logger.
Example of enabling logging to the console:
import logging
logger = logging.getLogger('azure.mgmt.common.filters')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
'''
def __init__(self, name=DEFAULT_LOG_NAME, level=DEFAULT_LOG_LEVEL):
if name is None:
raise ValueError('name cannot be None.')
if level is None:
raise ValueError('level cannot be None.')
self.level = level
self.logger = logging.getLogger(name)
def send(self, prepared_request):
self._log_request(prepared_request)
response = self.next.send(prepared_request)
self._log_response(response)
return response
@staticmethod
def _headers_to_string(headers):
mask_headers = ['authorization']
headers_raw = []
for header, value in headers.items():
if header.lower() in mask_headers:
value = '*****'
headers_raw.append('%s: %s' % (header, value))
return '\n'.join(headers_raw)
@staticmethod
def _pretty_print(content):
try:
return json.dumps(
json.loads(content),
sort_keys=True,
indent=4,
separators=(',', ': '),
)
except Exception:
pass
return content
def _log_request(self, request):
if self.logger.isEnabledFor(self.level):
headers = self._headers_to_string(request.headers)
msg = ['Request: %s %s\n%s\n' % (request.method, request.url, headers)]
if request.body:
msg.append(self._pretty_print(request.body))
self.logger.log(self.level, '\n'.join(msg))
def _log_response(self, response):
if self.logger.isEnabledFor(self.level):
headers = self._headers_to_string(response.headers)
msg = ['Response: %s %s\n%s\n' % (response.status_code, response.reason, headers)]
if response.text:
msg.append(self._pretty_print(response.text))
self.logger.log(self.level, '\n'.join(msg))
| 31.088889 | 94 | 0.610674 | [
"Apache-2.0"
] | Grey-Peters/IanPeters | prototype/api/FlaskApp/FlaskApp/azure_components/azure/mgmt/common/filters.py | 4,199 | Python |
import os
import sys
from pathlib import Path
from typing import List, Optional, Tuple
import i18n
import requests
import yaml
from . import config, frozen_utils, os_utils, print_utils
# The URL to the docker-compose.yml
BRAINFRAME_DOCKER_COMPOSE_URL = "https://{subdomain}aotu.ai/releases/brainframe/{version}/docker-compose.yml"
# The URL to the latest tag, which is just a file containing the latest version
# as a string
BRAINFRAME_LATEST_TAG_URL = (
"https://{subdomain}aotu.ai/releases/brainframe/latest"
)
def assert_installed(install_path: Path) -> None:
compose_path = install_path / "docker-compose.yml"
if not compose_path.is_file():
print_utils.fail_translate(
"general.brainframe-must-be-installed",
install_env_var=config.install_path.name,
)
def run(install_path: Path, commands: List[str]) -> None:
_assert_has_docker_permissions()
compose_path = install_path / "docker-compose.yml"
if frozen_utils.is_frozen():
# Rely on the system's Docker Compose, since Compose can't be easily embedded
# into a PyInstaller executable
full_command = ["docker-compose"]
else:
# Use the included Docker Compose
full_command = [
sys.executable,
"-m",
"compose",
]
full_command += [
"--file",
str(compose_path),
]
# Provide the override file if it exists
compose_override_path = install_path / "docker-compose.override.yml"
if compose_override_path.is_file():
full_command += ["--file", str(compose_override_path)]
# Provide the .env file if it exists
env_path = install_path / ".env"
if env_path.is_file():
full_command += ["--env-file", str(env_path)]
os_utils.run(full_command + commands)
def download(target: Path, version: str = "latest") -> None:
_assert_has_write_permissions(target.parent)
if version == "latest":
version = get_latest_version()
credentials = config.staging_credentials()
url = BRAINFRAME_DOCKER_COMPOSE_URL.format(
subdomain="staging." if config.is_staging.value else "",
version=version,
)
response = requests.get(url, auth=credentials, stream=True)
if not response.ok:
print_utils.fail_translate(
"general.error-downloading-docker-compose",
status_code=response.status_code,
error_message=response.text,
)
target.write_text(response.text)
if os_utils.is_root():
# Fix the permissions of the docker-compose.yml so that the BrainFrame
# group can edit it
os_utils.give_brainframe_group_rw_access([target])
def get_latest_version() -> str:
"""
:return: The latest available version in the format "vX.Y.Z"
"""
# Add the flags to authenticate with staging if the user wants to download
# from there
subdomain = "staging." if config.is_staging.value else ""
credentials = config.staging_credentials()
# Check what the latest version is
url = BRAINFRAME_LATEST_TAG_URL.format(subdomain=subdomain)
response = requests.get(url, auth=credentials)
return response.text
def check_existing_version(install_path: Path) -> str:
compose_path = install_path / "docker-compose.yml"
compose = yaml.load(compose_path.read_text(), Loader=yaml.SafeLoader)
version = compose["services"]["core"]["image"].split(":")[-1]
version = "v" + version
return version
def _assert_has_docker_permissions() -> None:
"""Fails if the user does not have permissions to interact with Docker"""
if not (os_utils.is_root() or os_utils.currently_in_group("docker")):
error_message = (
i18n.t("general.docker-bad-permissions")
+ "\n"
+ _group_recommendation_message("docker")
)
print_utils.fail(error_message)
def _assert_has_write_permissions(path: Path) -> None:
"""Fails if the user does not have write access to the given path."""
if os.access(path, os.W_OK):
return
error_message = i18n.t("general.file-bad-write-permissions", path=path)
error_message += "\n"
if path.stat().st_gid == os_utils.BRAINFRAME_GROUP_ID:
error_message += " " + _group_recommendation_message("brainframe")
else:
error_message += " " + i18n.t(
"general.unexpected-group-for-file", path=path, group="brainframe"
)
print_utils.fail(error_message)
def _group_recommendation_message(group: str) -> str:
if os_utils.added_to_group("brainframe"):
# The user is in the group, they just need to restart
return i18n.t("general.restart-for-group-access", group=group)
else:
# The user is not in the group, so they need to either add
# themselves or use sudo
return i18n.t("general.retry-as-root-or-group", group=group)
| 31.716129 | 109 | 0.672091 | [
"BSD-3-Clause"
] | aotuai/brainframe-cli | brainframe/cli/docker_compose.py | 4,916 | Python |
#!/usr/bin/env python3
def main():
pass
if __name__ == '__main__':
main()
| 8.6 | 26 | 0.569767 | [
"MIT"
] | reireias/dotfiles | .vim/template/python/base-atcoder.py | 86 | Python |
# author : 陈熙
# encoding:utf-8
from email.header import Header
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
class SendEmail:
sender = '[email protected]'#'[email protected]'
msg = MIMEMultipart('alternative')
msg['Subject'] = Header("长春大学成绩通知,请勿回复","utf-8")
msg['From'] = r"%s<[email protected]>"%Header("www.a-tom.win","utf-8")
def __init__(self,table,rcpt):
self.table = table
self.rcpt = rcpt
SendEmail.msg['To'] = self.rcpt
html_part = MIMEText(self.table,'html')
html_part.set_charset('gbk')
SendEmail.msg.attach(html_part)
def send(self):
try:
s = smtplib.SMTP('smtp.139.com')
s.login('atomuser','849801576')
s.sendmail(SendEmail.sender,self.rcpt,SendEmail.msg.as_string())
return '邮件发送成功,请登录邮箱查收...'
except Exception:
return '邮件发送失败... '
def __del__(self):
pass
| 27.157895 | 77 | 0.596899 | [
"MPL-2.0"
] | atomchan/CCUScore | ccuemail.py | 1,100 | Python |
# This file is part of datacube-ows, part of the Open Data Cube project.
# See https://opendatacube.org for more information.
#
# Copyright (c) 2017-2021 OWS Contributors
# SPDX-License-Identifier: Apache-2.0
"""Test update ranges on DB using Click testing
https://click.palletsprojects.com/en/7.x/testing/
"""
from datacube_ows.update_ranges_impl import main
def test_updates_ranges_schema(runner, role_name):
result = runner.invoke(main, ["--schema", "--role", role_name])
assert "Cannot find SQL resource" not in result.output
assert result.exit_code == 0
def test_update_ranges_views(runner):
result = runner.invoke(main, ["--views"])
assert "Cannot find SQL resource" not in result.output
assert result.exit_code == 0
def test_update_version(runner):
result = runner.invoke(main, ["--version"])
assert "Open Data Cube Open Web Services (datacube-ows) version" in result.output
assert result.exit_code == 0
def test_update_ranges_product(runner, product_name):
result = runner.invoke(main, [product_name])
assert "ERROR" not in result.output
assert result.exit_code == 0
def test_update_ranges_bad_product(runner, product_name):
result = runner.invoke(main, ["not_a_real_product_name"])
assert "not_a_real_product_name" in result.output
assert "Unrecognised product name" in result.output
assert result.exit_code == 1
def test_update_ranges(runner):
result = runner.invoke(main)
assert "ERROR" not in result.output
assert result.exit_code == 0
def test_update_ranges_misuse_cases(runner, role_name, product_name):
result = runner.invoke(main, ["--schema"])
assert "Sorry" in result.output
assert result.exit_code == 1
result = runner.invoke(main, ["--role", role_name])
assert "Sorry" in result.output
assert result.exit_code == 1
result = runner.invoke(main, ["--views", product_name])
assert "Sorry" in result.output
assert result.exit_code == 1
result = runner.invoke(main, ["--schema", product_name])
assert "Sorry" in result.output
assert result.exit_code == 1
| 32.476923 | 85 | 0.721933 | [
"Apache-2.0"
] | FlexiGroBots-H2020/datacube-ows | integration_tests/test_update_ranges.py | 2,111 | Python |
n = int(input('digite um numero para metros'))
print('o valor {} metros, vale {} em centimetros, e vale {} milimetros'.format(n, n*100, n*1000))
| 48.333333 | 97 | 0.675862 | [
"MIT"
] | KamiAono/CursoPython | Conversor_metros.py | 145 | Python |
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
| 30.094972 | 118 | 0.622424 | [
"ECL-2.0",
"Apache-2.0"
] | SoftwareAG/cumulocity-remote-access-local-proxy | c8ylp/cli/core.py | 21,548 | Python |
import pandas as pd
import numpy as np
from numpy.linalg import inv
def get_ffme_returns():
"""
Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap
"""
me_m = pd.read_csv("data/Portfolios_Formed_on_ME_monthly_EW.csv",
header=0, index_col=0, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_fff_returns():
"""
Load the Fama-French Research Factor Monthly Dataset
"""
rets = pd.read_csv("data/F-F_Research_Data_Factors_m.csv",
header=0, index_col=0, na_values=-99.99)/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_hfi_returns():
"""
Load and format the EDHEC Hedge Fund Index Returns
"""
hfi = pd.read_csv("data/edhec-hedgefundindices.csv",
header=0, index_col=0, parse_dates=True)
hfi = hfi/100
hfi.index = hfi.index.to_period('M')
return hfi
def get_ind_file(filetype, weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios files
Variant is a tuple of (weighting, size) where:
weighting is one of "ew", "vw"
number of inds is 30 or 49
"""
if filetype is "returns":
name = f"{weighting}_rets"
divisor = 100
elif filetype is "nfirms":
name = "nfirms"
divisor = 1
elif filetype is "size":
name = "size"
divisor = 1
else:
raise ValueError(f"filetype must be one of: returns, nfirms, size")
ind = pd.read_csv(f"data/ind{n_inds}_m_{name}.csv", header=0, index_col=0, na_values=-99.99)/divisor
ind.index = pd.to_datetime(ind.index, format="%Y%m").to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def get_ind_returns(weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios Monthly Returns
"""
return get_ind_file("returns", weighting=weighting, n_inds=n_inds)
def get_ind_nfirms(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average number of Firms
"""
return get_ind_file("nfirms", n_inds=n_inds)
def get_ind_size(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average size (market cap)
"""
return get_ind_file("size", n_inds=n_inds)
def get_ind_market_caps(n_inds=30, weights=False):
"""
Load the industry portfolio data and derive the market caps
"""
ind_nfirms = get_ind_nfirms(n_inds=n_inds)
ind_size = get_ind_size(n_inds=n_inds)
ind_mktcap = ind_nfirms * ind_size
if weights:
total_mktcap = ind_mktcap.sum(axis=1)
ind_capweight = ind_mktcap.divide(total_mktcap, axis="rows")
return ind_capweight
#else
return ind_mktcap
def get_total_market_index_returns(n_inds=30):
"""
Load the 30 industry portfolio data and derive the returns of a capweighted total market index
"""
ind_capweight = get_ind_market_caps(n_inds=n_inds)
ind_return = get_ind_returns(weighting="vw", n_inds=n_inds)
total_market_return = (ind_capweight * ind_return).sum(axis="columns")
return total_market_return
def skewness(r):
"""
Alternative to scipy.stats.skew()
Computes the skewness of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**3).mean()
return exp/sigma_r**3
def kurtosis(r):
"""
Alternative to scipy.stats.kurtosis()
Computes the kurtosis of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**4).mean()
return exp/sigma_r**4
def compound(r):
"""
returns the result of compounding the set of returns in r
"""
return np.expm1(np.log1p(r).sum())
def annualize_rets(r):
"""
Annualizes a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
compounded_growth = (1+r_valid).prod()
return compounded_growth**(1/years_fraction)-1
def annualize_vol(r):
"""
Annualizes the vol of a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
return r_valid.std()*((periods_per_year)**0.5)
def sharpe_ratio(r, riskfree_rate):
"""
Computes the annualized sharpe ratio of a set of returns
"""
# convert the annual riskfree rate to per period
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1
excess_ret = r - rf_per_period
ann_ex_ret = annualize_rets(excess_ret)
ann_vol = annualize_vol(r)
return ann_ex_ret/ann_vol
import scipy.stats
def is_normal(r, level=0.01):
"""
Applies the Jarque-Bera test to determine if a Series is normal or not
Test is applied at the 1% level by default
Returns True if the hypothesis of normality is accepted, False otherwise
"""
if isinstance(r, pd.DataFrame):
return r.aggregate(is_normal)
else:
statistic, p_value = scipy.stats.jarque_bera(r)
return p_value > level
def drawdown(return_series: pd.Series):
"""Takes a time series of asset returns.
returns a DataFrame with columns for
the wealth index,
the previous peaks, and
the percentage drawdown
"""
wealth_index = 1000*(1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks)/previous_peaks
return pd.DataFrame({"Wealth": wealth_index,
"Previous Peak": previous_peaks,
"Drawdown": drawdowns})
def semideviation(r):
"""
Returns the semideviation aka negative semideviation of r
r must be a Series or a DataFrame, else raises a TypeError
"""
if isinstance(r, pd.Series):
is_negative = r < 0
return r[is_negative].std(ddof=0)
elif isinstance(r, pd.DataFrame):
return r.aggregate(semideviation)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def var_historic(r, level=5):
"""
Returns the historic Value at Risk at a specified level
i.e. returns the number such that "level" percent of the returns
fall below that number, and the (100-level) percent are above
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.DataFrame):
return r.aggregate(var_historic, level=level)
elif isinstance(r, pd.Series):
return -np.percentile(r, level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def cvar_historic(r, level=5):
"""
Computes the Conditional VaR of Series or DataFrame
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.Series):
is_beyond = r <= -var_historic(r, level=level)
return -r[is_beyond].mean()
elif isinstance(r, pd.DataFrame):
return r.aggregate(cvar_historic, level=level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
from scipy.stats import norm
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
r = r[(r!=0) & (r.notnull())]
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
def portfolio_return(weights, returns):
"""
Computes the return on a portfolio from constituent returns and weights
weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix
"""
return weights.T @ returns
def portfolio_vol(weights, covmat):
"""
Computes the vol of a portfolio from a covariance matrix and constituent weights
weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix
"""
vol = (weights.T @ covmat @ weights)**0.5
return vol
def plot_ef2(n_points, er, cov):
"""
Plots the 2-asset efficient frontier
"""
if er.shape[0] != 2 or er.shape[0] != 2:
raise ValueError("plot_ef2 can only plot 2-asset frontiers")
weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
return ef.plot.line(x="Volatility", y="Returns", style=".-")
from scipy.optimize import minimize
def minimize_vol(target_return, er, cov):
"""
Returns the optimal weights that achieve the target return
given a set of expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
return_is_target = {'type': 'eq',
'args': (er,),
'fun': lambda weights, er: target_return - portfolio_return(weights,er)
}
weights = minimize(portfolio_vol, init_guess,
args=(cov,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,return_is_target),
bounds=bounds)
return weights.x
def tracking_error(r_a, r_b):
"""
Returns the Tracking Error between the two return series
"""
return np.sqrt(((r_a - r_b)**2).sum())
def msr(riskfree_rate, er, cov):
"""
Returns the weights of the portfolio that gives you the maximum sharpe ratio
given the riskfree rate and expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def neg_sharpe(weights, riskfree_rate, er, cov):
"""
Returns the negative of the sharpe ratio
of the given portfolio
"""
r = portfolio_return(weights, er)
vol = portfolio_vol(weights, cov)
return -(r - riskfree_rate)/vol
weights = minimize(neg_sharpe, init_guess,
args=(riskfree_rate, er, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def gmv(cov):
"""
Returns the weights of the Global Minimum Volatility portfolio
given a covariance matrix
"""
n = cov.shape[0]
return msr(0, np.repeat(1, n), cov)
def optimal_weights(n_points, er, cov):
"""
Returns a list of weights that represent a grid of n_points on the efficient frontier
"""
target_rs = np.linspace(er.min(), er.max(), n_points)
weights = [minimize_vol(target_return, er, cov) for target_return in target_rs]
return weights
def plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False):
"""
Plots the multi-asset efficient frontier
"""
weights = optimal_weights(n_points, er, cov)
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
ax = ef.plot.line(x="Volatility", y="Returns", style=style, legend=legend)
if show_cml:
ax.set_xlim(left = 0)
# get MSR
w_msr = msr(riskfree_rate, er, cov)
r_msr = portfolio_return(w_msr, er)
vol_msr = portfolio_vol(w_msr, cov)
# add CML
cml_x = [0, vol_msr]
cml_y = [riskfree_rate, r_msr]
ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10)
if show_ew:
n = er.shape[0]
w_ew = np.repeat(1/n, n)
r_ew = portfolio_return(w_ew, er)
vol_ew = portfolio_vol(w_ew, cov)
# add EW
ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10)
if show_gmv:
w_gmv = gmv(cov)
r_gmv = portfolio_return(w_gmv, er)
vol_gmv = portfolio_vol(w_gmv, cov)
# add EW
ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10)
return ax
def run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):
"""
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
"""
# set up the CPPI parameters
dates = risky_r.index
n_steps = len(dates)
account_value = start
floor_value = start*floor
peak = account_value
if isinstance(risky_r, pd.Series):
risky_r = pd.DataFrame(risky_r, columns=["R"])
if safe_r is None:
safe_r = pd.DataFrame().reindex_like(risky_r)
safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number
# set up some DataFrames for saving intermediate values
account_history = pd.DataFrame().reindex_like(risky_r)
risky_w_history = pd.DataFrame().reindex_like(risky_r)
cushion_history = pd.DataFrame().reindex_like(risky_r)
floorval_history = pd.DataFrame().reindex_like(risky_r)
peak_history = pd.DataFrame().reindex_like(risky_r)
for step in range(n_steps):
if drawdown is not None:
peak = np.maximum(peak, account_value)
floor_value = peak*(1-drawdown)
cushion = (account_value - floor_value)/account_value
risky_w = m*cushion
risky_w = np.minimum(risky_w, 1)
risky_w = np.maximum(risky_w, 0)
safe_w = 1-risky_w
risky_alloc = account_value*risky_w
safe_alloc = account_value*safe_w
# recompute the new account value at the end of this step
account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])
# save the histories for analysis and plotting
cushion_history.iloc[step] = cushion
risky_w_history.iloc[step] = risky_w
account_history.iloc[step] = account_value
floorval_history.iloc[step] = floor_value
peak_history.iloc[step] = peak
risky_wealth = start*(1+risky_r).cumprod()
backtest_result = {
"Wealth": account_history,
"Risky Wealth": risky_wealth,
"Risk Budget": cushion_history,
"Risky Allocation": risky_w_history,
"m": m,
"start": start,
"floor": floor,
"risky_r":risky_r,
"safe_r": safe_r,
"drawdown": drawdown,
"peak": peak_history,
"floor": floorval_history
}
return backtest_result
def summary_stats(r, riskfree_rate=0.03):
"""
Return a DataFrame that contains aggregated summary stats for the returns in the columns of r
"""
ann_r = annualize_rets(r)
ann_vol = annualize_vol(r)
ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate)
dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())
skew = r.aggregate(skewness)
kurt = r.aggregate(kurtosis)
cf_var5 = r.aggregate(var_gaussian, modified=True)
hist_cvar5 = r.aggregate(cvar_historic)
return pd.DataFrame({
"Annualized Return": ann_r,
"Annualized Vol": ann_vol,
"Skewness": skew,
"Kurtosis": kurt,
"Cornish-Fisher VaR (5%)": cf_var5,
"Historic CVaR (5%)": hist_cvar5,
"Sharpe Ratio": ann_sr,
"Max Drawdown": dd
})
def gbm(n_years = 10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True):
"""
Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo
:param n_years: The number of years to generate data for
:param n_paths: The number of scenarios/trajectories
:param mu: Annualized Drift, e.g. Market Return
:param sigma: Annualized Volatility
:param steps_per_year: granularity of the simulation
:param s_0: initial value
:return: a numpy array of n_paths columns and n_years*steps_per_year rows
"""
# Derive per-step Model Parameters from User Specifications
dt = 1/steps_per_year
n_steps = int(n_years*steps_per_year) + 1
# the standard way ...
# rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios))
# without discretization error ...
rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios))
rets_plus_1[0] = 1
ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1
return ret_val
import statsmodels.api as sm
def regress(dependent_variable, explanatory_variables, alpha=True):
"""
Runs a linear regression to decompose the dependent variable into the explanatory variables
returns an object of type statsmodel's RegressionResults on which you can call
.summary() to print a full summary
.params for the coefficients
.tvalues and .pvalues for the significance levels
.rsquared_adj and .rsquared for quality of fit
"""
if alpha:
explanatory_variables = explanatory_variables.copy()
explanatory_variables["Alpha"] = 1
lm = sm.OLS(dependent_variable, explanatory_variables).fit()
return lm
def portfolio_tracking_error(weights, ref_r, bb_r):
"""
returns the tracking error between the reference returns
and a portfolio of building block returns held with given weights
"""
return tracking_error(ref_r, (weights*bb_r).sum(axis=1))
def style_analysis(dependent_variable, explanatory_variables):
"""
Returns the optimal weights that minimizes the Tracking error between
a portfolio of the explanatory variables and the dependent variable
"""
n = explanatory_variables.shape[1]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
solution = minimize(portfolio_tracking_error, init_guess,
args=(dependent_variable, explanatory_variables,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
weights = pd.Series(solution.x, index=explanatory_variables.columns)
return weights
def ff_analysis(r, factors):
"""
Returns the loadings of r on the Fama French Factors
which can be read in using get_fff_returns()
the index of r must be a (not necessarily proper) subset of the index of factors
r is either a Series or a DataFrame
"""
if isinstance(r, pd.Series):
dependent_variable = r
explanatory_variables = factors.loc[r.index]
tilts = regress(dependent_variable, explanatory_variables).params
elif isinstance(r, pd.DataFrame):
tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns})
else:
raise TypeError("r must be a Series or a DataFrame")
return tilts
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
If supplied a set of capweights and a capweight tether, it is applied and reweighted
"""
n = len(r.columns)
ew = pd.Series(1/n, index=r.columns)
if cap_weights is not None:
cw = cap_weights.loc[r.index[0]] # starting cap weight
## exclude microcaps
if microcap_threshold is not None and microcap_threshold > 0:
microcap = cw < microcap_threshold
ew[microcap] = 0
ew = ew/ew.sum()
#limit weight to a multiple of capweight
if max_cw_mult is not None and max_cw_mult > 0:
ew = np.minimum(ew, cw*max_cw_mult)
ew = ew/ew.sum() #reweight
return ew
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
w = cap_weights.loc[r.index[1]]
return w/w.sum()
def backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert List of weights to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
def sample_cov(r, **kwargs):
"""
Returns the sample covariance of the supplied returns
"""
return r.cov()
def weight_gmv(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the GMV portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return gmv(est_cov)
def cc_cov(r, **kwargs):
"""
Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model
"""
rhos = r.corr()
n = rhos.shape[0]
# this is a symmetric matrix with diagonals all 1 - so the mean correlation is ...
rho_bar = (rhos.values.sum()-n)/(n*(n-1))
ccor = np.full_like(rhos, rho_bar)
np.fill_diagonal(ccor, 1.)
sd = r.std()
return pd.DataFrame(ccor * np.outer(sd, sd), index=r.columns, columns=r.columns)
def shrinkage_cov(r, delta=0.5, **kwargs):
"""
Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators
"""
prior = cc_cov(r, **kwargs)
sample = sample_cov(r, **kwargs)
return delta*prior + (1-delta)*sample
def risk_contribution(w,cov):
"""
Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix
"""
total_portfolio_var = portfolio_vol(w,cov)**2
# Marginal contribution of each constituent
marginal_contrib = cov@w
risk_contrib = np.multiply(marginal_contrib,w.T)/total_portfolio_var
return risk_contrib
def target_risk_contributions(target_risk, cov):
"""
Returns the weights of the portfolio that gives you the weights such
that the contributions to portfolio risk are as close as possible to
the target_risk, given the covariance matrix
"""
n = cov.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def msd_risk(weights, target_risk, cov):
"""
Returns the Mean Squared Difference in risk contributions
between weights and target_risk
"""
w_contribs = risk_contribution(weights, cov)
return ((w_contribs-target_risk)**2).sum()
weights = minimize(msd_risk, init_guess,
args=(target_risk, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def equal_risk_contributions(cov):
"""
Returns the weights of the portfolio that equalizes the contributions
of the constituents based on the given covariance matrix
"""
n = cov.shape[0]
return target_risk_contributions(target_risk=np.repeat(1/n,n), cov=cov)
def weight_erc(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the ERC portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return equal_risk_contributions(est_cov)
def implied_returns(delta, sigma, w):
"""
Obtain the implied expected returns by reverse engineering the weights
Inputs:
delta: Risk Aversion Coefficient (scalar)
sigma: Variance-Covariance Matrix (N x N) as DataFrame
w: Portfolio weights (N x 1) as Series
Returns an N x 1 vector of Returns as Series
"""
ir = delta * sigma.dot(w).squeeze() # to get a series from a 1-column dataframe
ir.name = 'Implied Returns'
return ir
# Assumes that Omega is proportional to the variance of the prior
def proportional_prior(sigma, tau, p):
"""
Returns the He-Litterman simplified Omega
Inputs:
sigma: N x N Covariance Matrix as DataFrame
tau: a scalar
p: a K x N DataFrame linking Q and Assets
returns a P x P DataFrame, a Matrix representing Prior Uncertainties
"""
helit_omega = p.dot(tau * sigma).dot(p.T)
# Make a diag matrix from the diag elements of Omega
return pd.DataFrame(np.diag(np.diag(helit_omega.values)),index=p.index, columns=p.index)
def bl(w_prior, sigma_prior, p, q,
omega=None,
delta=2.5, tau=.02):
"""
# Computes the posterior expected returns based on
# the original black litterman reference model
#
# W.prior must be an N x 1 vector of weights, a Series
# Sigma.prior is an N x N covariance matrix, a DataFrame
# P must be a K x N matrix linking Q and the Assets, a DataFrame
# Q must be an K x 1 vector of views, a Series
# Omega must be a K x K matrix a DataFrame, or None
# if Omega is None, we assume it is
# proportional to variance of the prior
# delta and tau are scalars
"""
if omega is None:
omega = proportional_prior(sigma_prior, tau, p)
# Force w.prior and Q to be column vectors
# How many assets do we have?
N = w_prior.shape[0]
# And how many views?
K = q.shape[0]
# First, reverse-engineer the weights to get pi
pi = implied_returns(delta, sigma_prior, w_prior)
# Adjust (scale) Sigma by the uncertainty scaling factor
sigma_prior_scaled = tau * sigma_prior
# posterior estimate of the mean, use the "Master Formula"
# we use the versions that do not require
# Omega to be inverted (see previous section)
# this is easier to read if we use '@' for matrixmult instead of .dot()
# mu_bl = pi + sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ (q - p @ pi)
mu_bl = pi + sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega).dot(q - p.dot(pi).values))
# posterior estimate of uncertainty of mu.bl
# sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ p @ sigma_prior_scaled
sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot(p).dot(sigma_prior_scaled)
return (mu_bl, sigma_bl)
# for convenience and readability, define the inverse of a dataframe
def inverse(d):
"""
Invert the dataframe by inverting the underlying matrix
"""
return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)
def weight_msr(sigma, mu, scale=True):
"""
Optimal (Tangent/Max Sharpe Ratio) Portfolio weights
by using the Markowitz Optimization Procedure
Mu is the vector of Excess expected Returns
Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series
This implements page 188 Equation 5.2.28 of
"The econometrics of financial markets" Campbell, Lo and Mackinlay.
"""
w = inverse(sigma).dot(mu)
if scale:
w = w/sum(w) # fix: this assumes all w is +ve
return w
| 35.79645 | 161 | 0.647811 | [
"MIT"
] | jaimeaguilera/Investing-projects | kit.py | 30,248 | Python |
"""
_SubscriptionList_
Module with data structures to handle PhEDEx subscriptions
in bulk.
"""
import logging
from WMCore.WMException import WMException
PhEDEx_VALID_SUBSCRIPTION_PRIORITIES = ['low', 'normal', 'high', 'reserved']
class PhEDExSubscriptionException(WMException):
"""
_PhEDExSubscriptionException_
Exception class for the phedex subscription
"""
pass
class PhEDExSubscription(object):
"""
_PhEDExSubscription_
Data structure which contains PHEDEx fields for
PhEDEx subscription data service
"""
def __init__(self, datasetPathList, nodeList, group, level = 'dataset',
priority = 'normal', move = 'n', static = 'n', custodial = 'n',
request_only = 'y', blocks = None, subscriptionId = -1, comments=""):
"""
Initialize PhEDEx subscription with default value
"""
if isinstance(datasetPathList, basestring):
datasetPathList = [datasetPathList]
if isinstance(nodeList, basestring):
nodeList = [nodeList]
self.datasetPaths = set(datasetPathList)
self.nodes = set(nodeList)
self.level = level.lower()
self.priority = priority.lower()
self.move = move.lower()
self.static = static.lower()
self.group = group
self.custodial = custodial.lower()
self.request_only = request_only.lower()
self.requesterID = None
self.status = "New"
self.comments = comments
# Subscription id for internal accounting
self.subscriptionIds = set([subscriptionId])
# Optional blocks for non-dataset subscriptions
self.blocks = blocks
try:
# Validation checks on the subscription
for option in (self.static, self.custodial, self.request_only, self.move):
assert option in ('y', 'n')
assert self.priority in PhEDEx_VALID_SUBSCRIPTION_PRIORITIES
assert self.level in ('dataset', 'block')
if self.level == 'block':
assert self.blocks is not None
except AssertionError:
msg = "The subscription is not a valid PhEDEx subscription.\n"
msg += "Check the options for this subscription: \n"
msg += "level: %s\n" % self.level
msg += "priority: %s\n" % self.priority
msg += "static: %s\n" % self.static
msg += "move: %s\n" % self.move
msg += "custodial: %s\n" % self.custodial
msg += "blocks: %s\n" % str(self.blocks)
raise PhEDExSubscriptionException(msg)
def __str__(self):
"""
Write out useful information for this object
:return:
"""
res = {'datasetPaths': self.datasetPaths, 'nodes': self.nodes,
'priority': self.priority, 'move': self.move,
'group': self.group, 'custodial': self.custodial,
'request_only': self.request_only, 'blocks': self.blocks}
return str(res)
def isEqualOptions(self, subscription):
return (self.level == subscription.level
and self.priority == subscription.priority
and self.request_only == subscription.request_only
and self.custodial == subscription.custodial
and self.group == subscription.group
and self.move == subscription.move
and self.static == subscription.static)
def isEqualDatasetPaths(self, subscription):
return (self.datasetPaths == subscription.datasetPaths
and self.isEqualOptions(subscription))
def isEqualNode(self, subscription):
return (self.nodes == subscription.nodes
and self.isEqualOptions(subscription))
def addDatasetPaths(self, subscription):
if self.requesterID != None:
msg = """ PhEDEx subscription is already made with id: %s\n
Create a new subscription
""" % (self.requesterID)
raise Exception(msg)
self.datasetPaths = self.datasetPaths.union(subscription.datasetPaths)
self.subscriptionIds = self.subscriptionIds.union(subscription.subscriptionIds)
def addNodes(self, subscription):
if self.requesterID != None:
msg = """ PhEDEx subscription is already made with id: %s\n
Create a new subscription
""" % (self.requesterID)
raise Exception(msg)
self.nodes = self.nodes.union(subscription.nodes)
self.subscriptionIds = self.subscriptionIds.union(subscription.subscriptionIds)
def getDatasetPaths(self):
return list(self.datasetPaths)
def getSubscriptionIds(self):
return list(self.subscriptionIds)
def getDatasetsAndBlocks(self):
"""
_getDatasetsAndBlocks_
Get the block structure
with datasets and blocks
"""
return self.blocks
def getNodes(self):
return list(self.nodes)
def getRequesterID(self):
return self.requesterID
def setRequesterID(self, requesterId):
if self.requesterID == None:
self.requesterID = requesterId
else:
msg = """ PhEDEx subscription is already made with id: %s\n
Create a new subscription
""" % (self.requesterID)
raise Exception(msg)
def matchesExistingTransferRequest(self, phedexDataSvc):
"""
_matchesExistingTransferRequest_
Check the given phedex data service to verify if an unapproved
transfer request equal to this subscription is already in the system.
"""
if len(self.datasetPaths) != 1 or len(self.nodes) != 1:
msg = "matchesExistingTransferRequest can only run in single node/dataset subscriptions"
raise PhEDExSubscriptionException(msg)
if self.level != 'dataset':
msg = "matchesExistingTransferRequest is only supported by dataset subscriptions"
raise PhEDExSubscriptionException(msg)
node = next(iter(self.nodes))
dataset = next(iter(self.datasetPaths))
# Get the unapproved requests involving the node and dataset in this subscription
existingRequests = phedexDataSvc.getRequestList(dataset = dataset,
node = node,
decision = 'pending')['phedex']['request']
for request in existingRequests:
# Get the detailed information in the request
requestId = request['id']
requestInfo = phedexDataSvc.getTransferRequests(request = requestId)['phedex']['request']
if not requestInfo:
logging.error("Transfer request %s doesn't exist in PhEDEx", requestId)
continue # Strange, but let it go.
requestInfo = requestInfo[0] # It's a singleton
# Make sure that the node is in the destinations
destinations = requestInfo['destinations']['node']
for nodeInfo in destinations:
if nodeInfo['name'] == node:
break
else:
continue
# Create a subscription with this info
phedexRequest = PhEDExSubscription(self.datasetPaths, self.nodes,
self.group, self.level, requestInfo['priority'],
requestInfo['move'], requestInfo['static'],
requestInfo['custodial'], self.request_only)
if self.isEqualOptions(phedexRequest):
return True
return False
def matchesExistingSubscription(self, phedexDataSvc):
"""
_matchesExistingSubscription_
Check the given phedex data service to verify if a PhEDEx subscription
equal to this subscription is already in the system.
"""
if len(self.datasetPaths) != 1 or len(self.nodes) != 1:
msg = "matchesExistingSubscription can only run in single node/dataset subscriptions"
raise PhEDExSubscriptionException(msg)
if self.level != 'dataset':
msg = "matchesExistingSubscription is only supported by dataset subscriptions"
raise PhEDExSubscriptionException(msg)
node = next(iter(self.nodes))
dataset = next(iter(self.datasetPaths))
# Check if the dataset has a subscription the given node
existingSubscription = phedexDataSvc.subscriptions(dataset = dataset,
node = node)['phedex']['dataset']
if len(existingSubscription) < 1:
# No subscriptions
return False
datasetInfo = existingSubscription[0]
for subscriptionInfo in datasetInfo['subscription']:
# Check that the node in the subscription matches the current node
if node != subscriptionInfo['node']:
continue
# Create a subscription with the info
phedexSub = PhEDExSubscription(self.datasetPaths, self.nodes,
self.group, subscriptionInfo['level'],
subscriptionInfo['priority'], subscriptionInfo['move'],
self.static, subscriptionInfo['custodial'],
self.request_only)
if self.isEqualOptions(phedexSub):
return True
return False
class SubscriptionList(object):
"""
_SubscriptionList_
Class represents collection of subscription.
This organizes the subscriptions in a way to minimize their number.
"""
def __init__(self):
self._subList = []
def addSubscription(self, subObj):
"""
_addSubscription_
Add a new subscription to the subscription policy.
If the same subscription key exist just add the node list
"""
for subscription in self._subList:
if subscription.isEqualOptions(subObj):
if subscription.isEqualNode(subObj):
subscription.addDatasetPaths(subObj)
return
self._subList.append(subObj)
return
def compact(self):
"""
_compact_
Compact the subscription list by aggregating the subscriptions where the nodes
share a list of dataset paths.
"""
# Bag the subscriptions, keep indexes of bagged items to
# avoid modifying the list in place or copying the list
bags = []
baggedIndexes = set()
for i, subscriptionA in enumerate(self._subList):
if i in baggedIndexes:
continue
bags.append([subscriptionA])
for j, subscriptionB in enumerate(self._subList[i + 1:], i + 1):
if j in baggedIndexes:
continue
if subscriptionA.isEqualOptions(subscriptionB) and \
subscriptionA.isEqualDatasetPaths(subscriptionB):
bags[-1].append(subscriptionB)
baggedIndexes.add(j)
# Aggregate the subscriptions in the bags
newSubList = []
for bag in bags:
anchorSubscription = bag[0]
for subscription in bag[1:]:
anchorSubscription.addNodes(subscription)
newSubList.append(anchorSubscription)
self._subList = newSubList
def getSubscriptionList(self):
return self._subList
| 38.821192 | 101 | 0.594336 | [
"Apache-2.0"
] | cbbrainerd/WMCore | src/python/WMCore/Services/PhEDEx/DataStructs/SubscriptionList.py | 11,724 | Python |
"""
Functions for signals and positions created within this package.
Copyright 2021 InferStat Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from infertrade.PandasEnum import PandasEnum
from infertrade.algos.community.allocations import scikit_allocation_factory, infertrade_export_allocations
from infertrade.algos.community.signals import normalised_close, scikit_signal_factory, infertrade_export_signals
# A dictionary providing the list of community signals and trading strategies.
infertrade_export = {
"signal": infertrade_export_signals,
PandasEnum.ALLOCATION.value: infertrade_export_allocations,
}
| 39.464286 | 113 | 0.819005 | [
"Apache-2.0"
] | holderfolyf/infertrade | infertrade/algos/community/__init__.py | 1,105 | Python |
import urllib.request,json
from .models import Sources, Articles
from datetime import datetime
#Getting api key
api_key = None
#Getting the news base url
# NEWS_API_KEY = None
# NEWS_API_BASE_URL = None
ARTICLE = None
def configure_request(app):
global api_key,NEWS_API_BASE_URL,NEWS_API_KEY,ARTICLE
api_key = app.config['NEWS_API_KEY']
ARTICLE = app.config['ARTICLE']
NEWS_API_BASE_URL = app.config['NEWS_API_BASE_URL']
NEWS_API_KEY = app.config['NEWS_API_KEY']
def get_source(category):
'''
function that gets the json response to our url request
'''
get_source_url = NEWS_API_BASE_URL.format(category,api_key)
print(get_source_url)
with urllib.request.urlopen(get_source_url) as url:
get_source_data = url.read()
get_source_response = json.loads(get_source_data)
sources_result = None
if get_source_response['sources']:
sources_results_list = get_source_response['sources']
sources_result = process_sources(sources_results_list)
print(sources_result)
return sources_result
def process_sources(sources_list):
'''
Function that checks the news results and turn them into objects
Args:
sources_list: A list of dictionaries that contain sources details
'''
sources_result = []
for source_item in sources_list:
author = source_item.get('author')
title = source_item.get('title')
imageurl = source_item.get('urltoimage')
description = source_item.get('description')
url = source_item.get('url')
id = source_item.get('id')
sources_object = Sources(author, title,imageurl,description,url,id)
sources_result.append(sources_object)
return sources_result
def get_articles(id):
'''
Function that processes the articles and returns a list of articles objects
'''
get_articles_url = ARTICLE.format(id,api_key)
print(get_articles_url)
with urllib.request.urlopen(get_articles_url) as url:
article_data = url.read()
articles_response = json.loads(article_data)
articles_object = None
if articles_response['articles']:
response_list= articles_response['articles']
articles_object = process_articles(response_list)
return articles_object
def process_articles(articles_list):
'''
function that checks the articles and processes them into instances
'''
articles_object = []
for article_item in articles_list:
author = article_item.get('name')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
image = article_item.get('urlToImage')
date = article_item.get('publishedAt')
if image:
articles_result = Articles(author,title,description,url,image,date)
articles_object.append(articles_result)
return articles_object
| 30.466019 | 79 | 0.658062 | [
"MIT"
] | ClarisseU/newsHighlight | app/requests.py | 3,138 | Python |
class Const:
"""
常量
"""
class ConstError(TypeError):pass
def __setattr__(self, name, value):
if name in self.__dict__:
raise self.ConstError("Can't rebind const (%s)" %name)
self.__dict__[name]=value
LAYOUT = Const()
"""
布局
"""
LAYOUT.SCREEN_WIDTH = 500
LAYOUT.SCREEN_HEIGHT = 600
LAYOUT.SIZE = 4
LAYOUT.TERRAIN_X = 50
LAYOUT.TERRAIN_Y = 20
LAYOUT.TILE_WIDTH = 100
LAYOUT.TILE_HEIGHT = 90
LAYOUT.SCOREBOARD_X = 50
LAYOUT.SCOREBOARD_Y = 400
LAYOUT.POPUP_X = 100
LAYOUT.POPUP_Y = 400
LAYOUT.POPUP_WIDTH = 300
LAYOUT.POPUP_HEIGHT = 200
IMAGE = Const()
"""
图片
"""
IMAGE.TILE = "assets/tile.png"# 地砖
IMAGE.MIST = "assets/mist.png"# 战争迷雾
IMAGE.HERO = "assets/hero.png" # 英雄
IMAGE.MONSTER = "assets/monster.png" # 怪物
IMAGE.PIT = "assets/pit.png" # 陷阱
IMAGE.GOLD = "assets/gold.png" # 黄金
IMAGE.BREEZE = "assets/breeze.png" # 微风
IMAGE.STRENCH = "assets/strench.png" # 臭气
EVENT = Const()
"""
事件
"""
EVENT.GAME_OVER = "gameOver" # 游戏结束
EVENT.GAME_CLEAR = "gameClear" # 游戏通关
EVENT.MONSTER_DEAD = "monsterDead" # 怪兽死亡
EVENT.HERO_WALK = "heroWalk" # 英雄走动
EVENT.HERO_ATTACK = "heroAttack" # 英雄攻击
EVENT.DANGER = "danger" # 遭遇危险
ENCOUNTER = Const()
"""
遭遇
"""
ENCOUNTER.MONSTER = 21 # 怪物
ENCOUNTER.PIT = 22 # 坑洞
ENCOUNTER.GOLD = 10 # 黄金
SCORE = Const()
"""
分数
"""
SCORE.WALK = -1 # 行走
SCORE.WIN = 1000 # 胜利
SCORE.LOSE = -1000 # 失败
SCORE.ATTACK = -10 # 攻击 | 20.088235 | 60 | 0.682284 | [
"Apache-2.0"
] | thales-ucas/wumpus | src/const.py | 1,502 | Python |
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from pedrec.models.constants.skeleton_pedrec import SKELETON_PEDREC, SKELETON_PEDREC_JOINT_COLORS, SKELETON_PEDREC_LIMB_COLORS
from pedrec.visualizers.visualization_helper_3d import draw_origin_3d, draw_grid_3d
def add_skeleton_3d_to_axes(ax: Axes3D, skeleton_3d: np.ndarray, size: float = 2, min_score: float = 0.3):
# Joints
xs = skeleton_3d[:, 0]
ys = skeleton_3d[:, 2]
zs = skeleton_3d[:, 1]
colors = []
for idx, joint in enumerate(skeleton_3d):
if joint[3] < min_score: # score
colors.append([0, 0, 0, 0])
else:
colors.append(SKELETON_PEDREC_JOINT_COLORS[idx].rgba_float_list)
ax.scatter(xs, ys, zs, c=colors, s=size)
# Limbs
for idx, pair in enumerate(SKELETON_PEDREC):
if (skeleton_3d[pair[0:2], 3] >= min_score).all():
ax.plot(skeleton_3d[pair[0:2], 0], skeleton_3d[pair[0:2], 2], skeleton_3d[pair[0:2], 1], linewidth=size, c=SKELETON_PEDREC_LIMB_COLORS[idx].rgba_float_list)
def get_skeleton_3d_figure(skeleton_3d: np.ndarray):
# Preparation
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
draw_grid_3d(ax)
draw_origin_3d(ax)
add_skeleton_3d_to_axes(ax, skeleton_3d)
return fig, ax
def plot_skeleton_3d(skeleton_3d: np.ndarray):
fig, ax = get_skeleton_3d_figure(skeleton_3d)
plt.show()
| 35.146341 | 168 | 0.704372 | [
"MIT"
] | noboevbo/PedRec | pedrec/visualizers/skeleton_3d_visualizer.py | 1,441 | Python |
from ctypes import (
Structure,
Union,
c_char,
c_double,
c_int,
c_long,
c_short,
c_ubyte,
c_uint,
c_ulong,
c_ushort,
)
from . import timespec
class c_gsfSeaBeamSpecific(Structure):
_fields_ = [("EclipseTime", c_ushort)]
class c_gsfEM100Specific(Structure):
_fields_ = [
("ship_pitch", c_double),
("transducer_pitch", c_double),
("mode", c_int),
("power", c_int),
("attenuation", c_int),
("tvg", c_int),
("pulse_length", c_int),
("counter", c_int),
]
class c_gsfEM121ASpecific(Structure):
_fields_ = [
("ping_number", c_int),
("mode", c_int),
("valid_beams", c_int),
("pulse_length", c_int),
("beam_width", c_int),
("tx_power", c_int),
("tx_status", c_int),
("rx_status", c_int),
("surface_velocity", c_double),
]
class c_gsfSeaBatSpecific(Structure):
_fields_ = [
("ping_number", c_int),
("surface_velocity", c_double),
("mode", c_int),
("sonar_range", c_int),
("transmit_power", c_int),
("receive_gain", c_int),
]
class c_gsfEM950Specific(Structure):
_fields_ = [
("ping_number", c_int),
("mode", c_int),
("ping_quality", c_int),
("ship_pitch", c_double),
("transducer_pitch", c_double),
("surface_velocity", c_double),
]
SEAMAP_DOUBLE_ARRAY_OF_2 = c_double * 2
class c_gsfSeamapSpecific(Structure):
_fields_ = [
("portTransmitter", SEAMAP_DOUBLE_ARRAY_OF_2),
("stbdTransmitter", SEAMAP_DOUBLE_ARRAY_OF_2),
("portGain", c_double),
("stbdGain", c_double),
("portPulseLength", c_double),
("stbdPulseLength", c_double),
("pressureDepth", c_double),
("altitude", c_double),
("temperature", c_double),
]
class c_gsfTypeIIISpecific(Structure):
_fields_ = [
("leftmost_beam", c_ushort),
("rightmost_beam", c_ushort),
("total_beams", c_ushort),
("nav_mode", c_ushort),
("ping_number", c_ushort),
("mission_number", c_ushort),
]
class c_gsfCmpSassSpecific(Structure):
_fields_ = [("lfreq", c_double), ("lntens", c_double)]
class c_gsfSBAmpSpecific(Structure):
_fields_ = [
("hour", c_ushort),
("minute", c_ushort),
("second", c_ushort),
("hundredths", c_ushort),
("block_number", c_uint),
("avg_gate_depth", c_short),
]
SEA_BAT_CHAR_ARRAY_OF_4 = c_char * 4
class c_gsfSeaBatIISpecific(Structure):
_fields_ = [
("ping_number", c_int),
("surface_velocity", c_double),
("mode", c_int),
("sonar_range", c_int),
("transmit_power", c_int),
("receive_gain", c_int),
("fore_aft_bw", c_double),
("athwart_bw", c_double),
("spare", SEA_BAT_CHAR_ARRAY_OF_4),
]
class c_gsfSeaBat8101Specific(Structure):
_fields_ = [
("ping_number", c_int),
("surface_velocity", c_double),
("mode", c_int),
("range", c_int),
("power", c_int),
("gain", c_int),
("pulse_width", c_int),
("tvg_spreading", c_int),
("tvg_absorption", c_int),
("fore_aft_bw", c_double),
("athwart_bw", c_double),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("projector", c_int),
("spare", SEA_BAT_CHAR_ARRAY_OF_4),
]
SEA_BEAM_ALGORITHM_ORDER = c_char * 5
SEA_BEAM_SPARE = c_char * 2
class c_gsfSeaBeam2112Specific(Structure):
_fields_ = [
("mode", c_int),
("surface_velocity", c_double),
("ssv_source", c_char),
("ping_gain", c_int),
("pulse_width", c_int),
("transmitter_attenuation", c_int),
("number_algorithms", c_int),
("algorithm_order", SEA_BEAM_ALGORITHM_ORDER),
("spare", SEA_BEAM_SPARE),
]
class c_gsfElacMkIISpecific(Structure):
_fields_ = [
("mode", c_int),
("ping_num", c_int),
("sound_vel", c_int),
("pulse_length", c_int),
("receiver_gain_stbd", c_int),
("receiver_gain_port", c_int),
("reserved", c_int),
]
class c_gsfEM3RunTime(Structure):
_fields_ = [
("model_number", c_int),
("dg_time", timespec.c_timespec),
("ping_number", c_int),
("serial_number", c_int),
("system_status", c_int),
("filter_id", c_int),
("min_depth", c_double),
("max_depth", c_double),
("absorption", c_double),
("pulse_length", c_double),
("transmit_beam_width", c_double),
("power_reduction", c_int),
("receive_beam_width", c_double),
("receive_bandwidth", c_int),
("receive_gain", c_int),
("cross_over_angle", c_int),
("ssv_source", c_int),
("swath_width", c_int),
("beam_spacing", c_int),
("coverage_sector", c_int),
("stabilization", c_int),
("port_swath_width", c_int),
("stbd_swath_width", c_int),
("port_coverage_sector", c_int),
("stbd_coverage_sector", c_int),
("hilo_freq_absorp_ratio", c_int),
("spare1", c_int),
]
EM3_RUN_TIME_2_ARRAY = c_gsfEM3RunTime * 2
class c_gsfEM3Specific(Structure):
_fields_ = [
("model_number", c_int),
("ping_number", c_int),
("serial_number", c_int),
("surface_velocity", c_double),
("transducer_depth", c_double),
("valid_beams", c_int),
("sample_rate", c_int),
("depth_difference", c_double),
("offset_multiplier", c_int),
("run_time", EM3_RUN_TIME_2_ARRAY),
]
EM3_RAW_SPARE_BYTES = c_ubyte * 16
class c_gsfEMRunTime(Structure): # 168 bytes
_fields_ = [
("model_number", c_int),
("dg_time", timespec.c_timespec),
("ping_counter", c_int),
("serial_number", c_int),
("operator_station_status", c_ubyte),
("processing_unit_status", c_ubyte),
("bsp_status", c_ubyte),
("head_transceiver_status", c_ubyte),
("mode", c_ubyte),
("filter_id", c_ubyte),
("min_depth", c_double),
("max_depth", c_double),
("absorption", c_double),
("tx_pulse_length", c_double),
("tx_beam_width", c_double),
("tx_power_re_max", c_double),
("rx_beam_width", c_double),
("rx_bandwidth", c_double),
("rx_fixed_gain", c_double),
("tvg_cross_over_angle", c_double),
("ssv_source", c_ubyte),
("max_port_swath_width", c_int),
("beam_spacing", c_ubyte),
("max_port_coverage", c_int),
("stabilization", c_ubyte),
("max_stbd_coverage", c_int),
("max_stbd_swath_width", c_int),
("durotong_speed", c_double),
("hi_low_absorption_ratio", c_double),
("tx_along_tilt", c_double),
("filter_id_2", c_ubyte),
("spare", EM3_RAW_SPARE_BYTES),
]
class c_gsfEMPUStatus(Structure): # 42 bytes
_fields_ = [
("pu_cpu_load", c_double),
("sensor_status", c_ushort),
("achieved_port_coverage", c_int),
("achieved_stbd_coverage", c_int),
("yaw_stabilization", c_double),
("spare", EM3_RAW_SPARE_BYTES),
]
class c_gsfEM3RawTxSector(Structure): # 72 bytes
_fields_ = [
("tilt_angle", c_double),
("focus_range", c_double),
("signal_length", c_double),
("transmit_delay", c_double),
("center_frequency", c_double),
("waveform_id", c_int),
("sector_number", c_int),
("signal_bandwidth", c_double),
("spare", EM3_RAW_SPARE_BYTES),
]
GSF_MAX_EM3_SECTORS = 20
EM3_RAW_SECTORS = c_gsfEM3RawTxSector * GSF_MAX_EM3_SECTORS # 1440 bytes
class c_gsfEM3RawSpecific(Structure): # 1792 bytes (1746 + 23 * 2)
_fields_ = [
("model_number", c_int),
("ping_counter", c_int),
("serial_number", c_int),
("surface_velocity", c_double),
("transducer_depth", c_double),
("valid_detections", c_int),
("sampling_frequency", c_double),
("vehicle_depth", c_double),
("depth_difference", c_double),
("offset_multiplier", c_int),
("spare_1", EM3_RAW_SPARE_BYTES),
("transmit_sectors", c_int), # 80 bytes
("sector", EM3_RAW_SECTORS), # 1520 bytes
("spare_2", EM3_RAW_SPARE_BYTES), # 1536 bytes
("run_time", c_gsfEMRunTime), # 1704 bytes
("pu_status", c_gsfEMPUStatus), # 1746 bytes
]
RESON8100_SPARE_BYTES = c_char * 2
class c_gsfReson8100Specific(Structure):
_fields_ = [
("latency", c_int),
("ping_number", c_int),
("sonar_id", c_int),
("sonar_model", c_int),
("frequency", c_int),
("surface_velocity", c_double),
("sample_rate", c_int),
("ping_rate", c_int),
("mode", c_int),
("range", c_int),
("power", c_int),
("gain", c_int),
("tvg_spreading", c_int),
("tvg_absorption", c_int),
("fore_aft_bw", c_double),
("athwart_bw", c_double),
("projector_type", c_int),
("projector_angle", c_int),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("filters_active", c_int),
("temperature", c_int),
("beam_spacing", c_double),
("spare", RESON8100_SPARE_BYTES),
]
RESON7100_RESERVED_1 = c_ubyte * 16
RESON7100_RESERVED_2 = c_char * 15
RESON7100_RESERVED_3 = c_char * 8
class c_gsfReson7100Specific(Structure):
_fields_ = [
("protocol_version", c_uint),
("device_id", c_uint),
("reserved_1", RESON7100_RESERVED_1),
("major_serial_number", c_uint),
("minor_serial_number", c_uint),
("ping_number", c_uint),
("multi_ping_seq", c_uint),
("frequency", c_double),
("sample_rate", c_double),
("receiver_bandwdth", c_double),
("tx_pulse_width", c_double),
("tx_pulse_type_id", c_uint),
("tx_pulse_envlp_id", c_uint),
("tx_pulse_envlp_param", c_double),
("tx_pulse_reserved", c_uint),
("max_ping_rate", c_double),
("ping_period", c_double),
("range", c_double),
("power", c_double),
("gain", c_double),
("control_flags", c_uint),
("projector_id", c_uint),
("projector_steer_angl_vert", c_double),
("projector_steer_angl_horz", c_double),
("projector_beam_wdth_vert", c_double),
("projector_beam_wdth_horz", c_double),
("projector_beam_focal_pt", c_double),
("projector_beam_weighting_window_type", c_uint),
("projector_beam_weighting_window_param", c_uint),
("transmit_flags", c_uint),
("hydrophone_id", c_uint),
("receiving_beam_weighting_window_type", c_uint),
("receiving_beam_weighting_window_param", c_uint),
("receive_flags", c_uint),
("receive_beam_width", c_double),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("absorption", c_double),
("sound_velocity", c_double),
("spreading", c_double),
("raw_data_from_7027", c_ubyte),
("reserved_2", RESON7100_RESERVED_2),
("sv_source", c_ubyte),
("layer_comp_flag", c_ubyte),
("reserved_3", RESON7100_RESERVED_3),
]
RESONTSERIES_RESERVED_1 = c_ubyte * 10
RESONTSERIES_RESERVED_2 = c_ubyte * 3
RESONTSERIES_RESERVED_3 = c_ubyte * 32
RESONTSERIES_RESERVED_7027 = c_ubyte * 420
RESONTSERIES_DEVICE_DESCRIPTION = c_char * 60
class c_gsfResonTSeriesSpecific(Structure):
_fields_ = [
("protocol_version", c_uint),
("device_id", c_uint),
("number_devices", c_uint),
("system_enumerator", c_ushort),
("reserved_1", RESONTSERIES_RESERVED_1),
("major_serial_number", c_uint),
("minor_serial_number", c_uint),
("ping_number", c_uint),
("multi_ping_seq", c_uint),
("frequency", c_double),
("sample_rate", c_double),
("receiver_bandwdth", c_double),
("tx_pulse_width", c_double),
("tx_pulse_type_id", c_uint),
("tx_pulse_envlp_id", c_uint),
("tx_pulse_envlp_param", c_double),
("tx_pulse_mode", c_ushort),
("tx_pulse_reserved", c_ushort),
("max_ping_rate", c_double),
("ping_period", c_double),
("range", c_double),
("power", c_double),
("gain", c_double),
("control_flags", c_uint),
("projector_id", c_uint),
("projector_steer_angl_vert", c_double),
("projector_steer_angl_horz", c_double),
("projector_beam_wdth_vert", c_double),
("projector_beam_wdth_horz", c_double),
("projector_beam_focal_pt", c_double),
("projector_beam_weighting_window_type", c_uint),
("projector_beam_weighting_window_param", c_double),
("transmit_flags", c_uint),
("hydrophone_id", c_uint),
("receiving_beam_weighting_window_type", c_uint),
("receiving_beam_weighting_window_param", c_double),
("receive_flags", c_uint),
("receive_beam_width", c_double),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("absorption", c_double),
("sound_velocity", c_double),
("sv_source", c_ubyte),
("spreading", c_double),
("beam_spacing_mode", c_ushort),
("sonar_source_mode", c_ushort),
("coverage_mode", c_ubyte),
("coverage_angle", c_double),
("horizontal_receiver_steering_angle", c_double),
("reserved_2", RESONTSERIES_RESERVED_2),
("uncertainty_type", c_uint),
("transmitter_steering_angle", c_double),
("applied_roll", c_double),
("detection_algorithm", c_ushort),
("detection_flags", c_uint),
("device_description", RESONTSERIES_DEVICE_DESCRIPTION),
("reserved_7027", RESONTSERIES_RESERVED_7027),
("reserved_3", RESONTSERIES_RESERVED_3),
]
EM4_SPARE_BYTES = c_ubyte * 16
class c_gsfEM4TxSector(Structure):
_fields_ = [
("tilt_angle", c_double),
("focus_range", c_double),
("signal_length", c_double),
("transmit_delay", c_double),
("center_frequency", c_double),
("mean_absorption", c_double),
("waveform_id", c_int),
("sector_number", c_int),
("signal_bandwidth", c_double),
("spare", EM4_SPARE_BYTES),
]
EM4_SECTORS = c_gsfEM4TxSector * 9
class c_gsfEM4Specific(Structure):
_fields_ = [
("model_number", c_int),
("ping_counter", c_int),
("serial_number", c_int),
("surface_velocity", c_double),
("transducer_depth", c_double),
("valid_detections", c_int),
("sampling_frequency", c_double),
("doppler_corr_scale", c_uint),
("vehicle_depth", c_double),
("spare_1", EM4_SPARE_BYTES),
("transmit_sectors", c_int),
("sector", EM4_SECTORS),
("spare_2", EM4_SPARE_BYTES),
("run_time", c_gsfEMRunTime),
("pu_status", c_gsfEMPUStatus),
]
GEOSWATH_SPARE_BYTES = c_char * 32
class c_gsfGeoSwathPlusSpecific(Structure):
_fields_ = [
("data_source", c_int),
("side", c_int),
("model_number", c_int),
("frequency", c_double),
("echosounder_type", c_int),
("ping_number", c_long),
("num_nav_samples", c_int),
("num_attitude_samples", c_int),
("num_heading_samples", c_int),
("num_miniSVS_samples", c_int),
("num_echosounder_samples", c_int),
("num_raa_samples", c_int),
("mean_sv", c_double),
("surface_velocity", c_double),
("valid_beams", c_int),
("sample_rate", c_double),
("pulse_length", c_double),
("ping_length", c_int),
("transmit_power", c_int),
("sidescan_gain_channel", c_int),
("stabilization", c_int),
("gps_quality", c_int),
("range_uncertainty", c_double),
("angle_uncertainty", c_double),
("spare", GEOSWATH_SPARE_BYTES),
]
KLEIN5410_SPARE_BYTES = c_char * 32
class c_gsfKlein5410BssSpecific(Structure):
_fields_ = [
("data_source", c_int),
("side", c_int),
("model_number", c_int),
("acoustic_frequency", c_double),
("sampling_frequency", c_double),
("ping_number", c_uint),
("num_samples", c_uint),
("num_raa_samples", c_uint),
("error_flags", c_uint),
("range", c_uint),
("fish_depth", c_double),
("fish_altitude", c_double),
("sound_speed", c_double),
("tx_waveform", c_int),
("altimeter", c_int),
("raw_data_config", c_uint),
("spare", KLEIN5410_SPARE_BYTES),
]
DELTAT_FILE_TYPE = c_char * 4
DELTAT_SPARE = c_char * 32
class c_gsfDeltaTSpecific(Structure):
_fields_ = [
("decode_file_type", DELTAT_FILE_TYPE),
("version", c_char),
("ping_byte_size", c_int),
("interrogation_time", timespec.c_timespec),
("samples_per_beam", c_int),
("sector_size", c_double),
("start_angle", c_double),
("angle_increment", c_double),
("acoustic_range", c_int),
("acoustic_frequency", c_int),
("sound_velocity", c_double),
("range_resolution", c_double),
("profile_tilt_angle", c_double),
("repetition_rate", c_double),
("ping_number", c_ulong),
("intensity_flag", c_ubyte),
("ping_latency", c_double),
("data_latency", c_double),
("sample_rate_flag", c_ubyte),
("option_flags", c_ubyte),
("num_pings_avg", c_int),
("center_ping_time_offset", c_double),
("user_defined_byte", c_ubyte),
("altitude", c_double),
("external_sensor_flags", c_char),
("pulse_length", c_double),
("fore_aft_beamwidth", c_double),
("athwartships_beamwidth", c_double),
("spare", DELTAT_SPARE),
]
EM12_SPARE = c_char * 32
class c_gsfEM12Specific(Structure):
_fields_ = [
("ping_number", c_int),
("resolution", c_int),
("ping_quality", c_int),
("sound_velocity", c_double),
("mode", c_int),
("spare", EM12_SPARE),
]
R2SONIC_MODELNO = c_ubyte * 12
R2SONIC_SERIALNO = c_ubyte * 12
R2SONIC_INFO = c_double * 12
R2SONIC_SPARE = c_ubyte * 32
class c_gsfR2SonicSpecific(Structure):
_fields_ = [
("model_number", R2SONIC_MODELNO),
("serial_number", R2SONIC_SERIALNO),
("dg_time", timespec.c_timespec),
("ping_number", c_uint),
("ping_period", c_double),
("sound_speed", c_double),
("frequency", c_double),
("tx_power", c_double),
("tx_pulse_width", c_double),
("tx_beamwidth_vert", c_double),
("tx_beamwidth_horiz", c_double),
("tx_steering_vert", c_double),
("tx_steering_horiz", c_double),
("tx_misc_info", c_uint),
("rx_bandwidth", c_double),
("rx_sample_rate", c_double),
("rx_range", c_double),
("rx_gain", c_double),
("rx_spreading", c_double),
("rx_absorption", c_double),
("rx_mount_tilt", c_double),
("rx_misc_info", c_uint),
("reserved", c_ushort),
("num_beams", c_ushort),
("A0_more_info", R2SONIC_INFO),
("A2_more_info", R2SONIC_INFO),
("G0_depth_gate_min", c_double),
("G0_depth_gate_max", c_double),
("G0_depth_gate_slope", c_double),
("spare", R2SONIC_SPARE),
]
SBECHOTRAC_SPARE = c_char * 4
class c_gsfSBEchotracSpecific(Structure):
_fields_ = [
("navigation_error", c_int),
("mpp_source", c_ushort),
("tide_source", c_ushort),
("dynamic_draft", c_double),
("spare", SBECHOTRAC_SPARE),
]
SBMGD77_SPARE = c_char * 4
class c_gsfSBMGD77Specific(Structure):
_fields_ = [
("time_zone_corr", c_ushort),
("position_type_code", c_ushort),
("correction_code", c_ushort),
("bathy_type_code", c_ushort),
("quality_code", c_ushort),
("travel_time", c_double),
("spare", SBMGD77_SPARE),
]
SBBDB_SPARE = c_char * 4
class c_gsfSBBDBSpecific(Structure):
_fields_ = [
("doc_no", c_int),
("eval", c_char),
("classification", c_char),
("track_adj_flag", c_char),
("source_flag", c_char),
("pt_or_track_ln", c_char),
("datum_flag", c_char),
("spare", c_char),
]
SBNOSHDB_SPARE = c_char * 4
class c_gsfSBNOSHDBSpecific(Structure):
_fields_ = [
("type_code", c_ushort),
("carto_code", c_ushort),
("spare", SBNOSHDB_SPARE),
]
SBNAVISOUND_SPARE = c_char * 8
class c_gsfSBNavisoundSpecific(Structure):
_fields_ = [
("pulse_length", c_double),
("spare", SBNAVISOUND_SPARE),
]
KMALL_TX_SECTOR_SPARE_BYTES = c_ubyte * 20
class c_gsfKMALLTxSector(Structure):
_fields_ = [
("txSectorNumb", c_int),
("txArrNumber", c_int),
("txSubArray", c_int),
("sectorTransmitDelay_sec", c_double),
("tiltAngleReTx_deg", c_double),
("txNominalSourceLevel_dB", c_double),
("txFocusRange_m", c_double),
("centreFreq_Hz", c_double),
("signalBandWidth_Hz", c_double),
("totalSignalLength_sec", c_double),
("pulseShading", c_int),
("signalWaveForm", c_int),
("spare1", KMALL_TX_SECTOR_SPARE_BYTES)
]
KMALL_EXTRA_DET_SPARE_BYTES = c_ubyte * 32
class c_gsfKMALLExtraDetClass(Structure):
_fields_ = [
("numExtraDetInClass", c_int),
("alarmFlag", c_int),
("spare", KMALL_EXTRA_DET_SPARE_BYTES)
]
# Sensor specific data structures for the Kongsberg 2040 / SIS 5.0 */
KMALL_SPARE_BYTES_1 = c_ubyte * 8
KMALL_SPARE_BYTES_2 = c_ubyte * 16
KMALL_SPARE_BYTES_3 = c_ubyte * 32
KMALL_SPARE_BYTES_4 = c_ubyte * 32
KMALL_SPARE_BYTES_5 = c_ubyte * 32
KMALL_SECTOR = c_gsfKMALLTxSector * 9
KMALL_EXTRA_DET_CLASS_INFO = c_gsfKMALLExtraDetClass * 11
class c_gsfKMALLSpecific(Structure):
_fields_ = [
("gsfKMALLVersion", c_int),
("dgmType", c_int),
("dgmVersion", c_int),
("systemID", c_int),
("echoSounderID", c_int),
("spare1", KMALL_SPARE_BYTES_1),
("numBytesCmnPart", c_int),
("pingCnt", c_int),
("rxFansPerPing", c_int),
("rxFanIndex", c_int),
("swathsPerPing", c_int),
("swathAlongPosition", c_int),
("txTransducerInd", c_int),
("rxTransducerInd", c_int),
("numRxTransducers", c_int),
("algorithmType", c_int),
("spare2", KMALL_SPARE_BYTES_2),
("numBytesInfoData", c_int),
("pingRate_Hz", c_double),
("beamSpacing", c_int),
("depthMode", c_int),
("subDepthMode", c_int),
("distanceBtwSwath", c_int),
("detectionMode", c_int),
("pulseForm", c_int),
("frequencyMode_Hz", c_double),
("freqRangeLowLim_Hz", c_double),
("freqRangeHighLim_Hz", c_double),
("maxTotalTxPulseLength_sec", c_double),
("maxEffTxPulseLength_sec", c_double),
("maxEffTxBandWidth_Hz", c_double),
("absCoeff_dBPerkm", c_double),
("portSectorEdge_deg", c_double),
("starbSectorEdge_deg", c_double),
("portMeanCov_deg", c_double),
("starbMeanCov_deg", c_double),
("portMeanCov_m", c_double),
("starbMeanCov_m", c_double),
("modeAndStabilisation", c_int),
("runtimeFilter1", c_int),
("runtimeFilter2", c_int),
("pipeTrackingStatus", c_int),
("transmitArraySizeUsed_deg", c_double),
("receiveArraySizeUsed_deg", c_double),
("transmitPower_dB", c_double),
("SLrampUpTimeRemaining", c_int),
("yawAngle_deg", c_double),
("numTxSectors", c_int),
("numBytesPerTxSector", c_int),
("headingVessel_deg", c_double),
("soundSpeedAtTxDepth_mPerSec", c_double),
("txTransducerDepth_m", c_double),
("z_waterLevelReRefPoint_m", c_double),
("x_kmallToall_m", c_double),
("y_kmallToall_m", c_double),
("latLongInfo", c_int),
("posSensorStatus", c_int),
("attitudeSensorStatus", c_int),
("latitude_deg", c_double),
("longitude_deg", c_double),
("ellipsoidHeightReRefPoint_m", c_double),
("spare3", KMALL_SPARE_BYTES_3),
("sector", KMALL_SECTOR),
("numBytesRxInfo", c_int),
("numSoundingsMaxMain", c_int),
("numSoundingsValidMain", c_int),
("numBytesPerSounding", c_int),
("WCSampleRate", c_double),
("seabedImageSampleRate", c_double),
("BSnormal_dB", c_double),
("BSoblique_dB", c_double),
("extraDetectionAlarmFlag", c_int),
("numExtraDetections", c_int),
("numExtraDetectionClasses", c_int),
("numBytesPerClass", c_int),
("spare4", KMALL_SPARE_BYTES_4),
("extraDetClassInfo", KMALL_EXTRA_DET_CLASS_INFO),
("spare5", KMALL_SPARE_BYTES_5)
]
class c_gsfSensorSpecific(Union):
_fields_ = [
("gsfSeaBeamSpecific", c_gsfSeaBeamSpecific),
("gsfEM100Specific", c_gsfEM100Specific),
("gsfEM121ASpecific", c_gsfEM121ASpecific),
("gsfEM121Specific", c_gsfEM121ASpecific),
("gsfSeaBatSpecific", c_gsfSeaBatSpecific),
("gsfEM950Specific", c_gsfEM950Specific),
("gsfEM1000Specific", c_gsfEM950Specific),
("gsfSeamapSpecific", c_gsfSeamapSpecific),
("gsfTypeIIISeaBeamSpecific", c_gsfTypeIIISpecific),
("gsfSASSSpecific", c_gsfTypeIIISpecific),
("gsfCmpSassSpecific", c_gsfCmpSassSpecific),
("gsfSBAmpSpecific", c_gsfSBAmpSpecific),
("gsfSeaBatIISpecific", c_gsfSeaBatIISpecific),
("gsfSeaBat8101Specific", c_gsfSeaBat8101Specific),
("gsfSeaBeam2112Specific", c_gsfSeaBeam2112Specific),
("gsfElacMkIISpecific", c_gsfElacMkIISpecific),
# used for EM120, EM300, EM1002, EM3000, EM3002, and EM121A_SIS
("gsfEM3Specific", c_gsfEM3Specific),
# used for EM120, EM300, EM1002, EM3000, EM3002, and EM121A_SIS
# with raw range and beam angle
("gsfEM3RawSpecific", c_gsfEM3RawSpecific),
("gsfReson8100Specific", c_gsfReson8100Specific),
("gsfReson7100Specific", c_gsfReson7100Specific),
# used for T50 and T20
("gsfResonTSeriesSpecific", c_gsfResonTSeriesSpecific),
# used for EM710, EM302, EM122, and EM2040
("gsfEM4Specific", c_gsfEM4Specific),
# DHG 2006/09/27 Use for GeoSwath+ interferometer
("gsfGeoSwathPlusSpecific", c_gsfGeoSwathPlusSpecific),
# Use for Klein 5410 Bathy Sidescan
("gsfKlein5410BssSpecific", c_gsfKlein5410BssSpecific),
("gsfDeltaTSpecific", c_gsfDeltaTSpecific),
("gsfEM12Specific", c_gsfEM12Specific),
("gsfR2SonicSpecific", c_gsfR2SonicSpecific),
("gsfKMallSpecific", c_gsfKMALLSpecific),
("gsfSBEchotracSpecific", c_gsfSBEchotracSpecific),
("gsfSBBathy2000Specific", c_gsfSBEchotracSpecific),
("gsfSBMGD77Specific", c_gsfSBMGD77Specific),
("gsfSBBDBSpecific", c_gsfSBBDBSpecific),
("gsfSBNOSHDBSpecific", c_gsfSBNOSHDBSpecific),
("gsfSBPDDSpecific", c_gsfSBEchotracSpecific),
("gsfSBNavisoundSpecific", c_gsfSBNavisoundSpecific),
]
| 30.908791 | 76 | 0.598606 | [
"MIT"
] | UKHO/gsfpy | gsfpy3_09/gsfSensorSpecific.py | 28,127 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Aptitude technologie and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestPriceConfigurator(unittest.TestCase):
pass
| 20.727273 | 59 | 0.785088 | [
"MIT"
] | Aptitudetech/shei | shei/shei/doctype/price_configurator/test_price_configurator.py | 228 | Python |
n=input("My Name is Delight Kurian Chandy")
print(n)
| 13.5 | 43 | 0.722222 | [
"MIT"
] | Delightkc/fosslab | script.py | 54 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.