repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
GitAngel/django | django/db/models/aggregates.py | 161 | 6624 | """
Classes to represent the definitions of aggregate functions.
"""
from django.core.exceptions import FieldError
from django.db.models.expressions import Func, Value
from django.db.models.fields import FloatField, IntegerField
__all__ = [
'Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance',
]
class Aggregate(Func):
contains_aggregate = True
name = None
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# Aggregates are not allowed in UPDATE queries, so ignore for_save
c = super(Aggregate, self).resolve_expression(query, allow_joins, reuse, summarize)
if not summarize:
expressions = c.get_source_expressions()
for index, expr in enumerate(expressions):
if expr.contains_aggregate:
before_resolved = self.get_source_expressions()[index]
name = before_resolved.name if hasattr(before_resolved, 'name') else repr(before_resolved)
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (c.name, name, name))
c._patch_aggregate(query) # backward-compatibility support
return c
@property
def input_field(self):
return self.source_expressions[0]
@property
def default_alias(self):
expressions = self.get_source_expressions()
if len(expressions) == 1 and hasattr(expressions[0], 'name'):
return '%s__%s' % (expressions[0].name, self.name.lower())
raise TypeError("Complex expressions require an alias")
def get_group_by_cols(self):
return []
def _patch_aggregate(self, query):
"""
Helper method for patching 3rd party aggregates that do not yet support
the new way of subclassing. This method will be removed in Django 1.10.
add_to_query(query, alias, col, source, is_summary) will be defined on
legacy aggregates which, in turn, instantiates the SQL implementation of
the aggregate. In all the cases found, the general implementation of
add_to_query looks like:
def add_to_query(self, query, alias, col, source, is_summary):
klass = SQLImplementationAggregate
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
By supplying a known alias, we can get the SQLAggregate out of the
aggregates dict, and use the sql_function and sql_template attributes
to patch *this* aggregate.
"""
if not hasattr(self, 'add_to_query') or self.function is not None:
return
placeholder_alias = "_XXXXXXXX_"
self.add_to_query(query, placeholder_alias, None, None, None)
sql_aggregate = query.aggregates.pop(placeholder_alias)
if 'sql_function' not in self.extra and hasattr(sql_aggregate, 'sql_function'):
self.extra['function'] = sql_aggregate.sql_function
if hasattr(sql_aggregate, 'sql_template'):
self.extra['template'] = sql_aggregate.sql_template
class Avg(Aggregate):
function = 'AVG'
name = 'Avg'
def __init__(self, expression, **extra):
output_field = extra.pop('output_field', FloatField())
super(Avg, self).__init__(expression, output_field=output_field, **extra)
def as_oracle(self, compiler, connection):
if self.output_field.get_internal_type() == 'DurationField':
expression = self.get_source_expressions()[0]
from django.db.backends.oracle.functions import IntervalToSeconds, SecondsToInterval
return compiler.compile(
SecondsToInterval(Avg(IntervalToSeconds(expression)))
)
return super(Avg, self).as_sql(compiler, connection)
class Count(Aggregate):
function = 'COUNT'
name = 'Count'
template = '%(function)s(%(distinct)s%(expressions)s)'
def __init__(self, expression, distinct=False, **extra):
if expression == '*':
expression = Value(expression)
super(Count, self).__init__(
expression, distinct='DISTINCT ' if distinct else '', output_field=IntegerField(), **extra)
def __repr__(self):
return "{}({}, distinct={})".format(
self.__class__.__name__,
self.arg_joiner.join(str(arg) for arg in self.source_expressions),
'False' if self.extra['distinct'] == '' else 'True',
)
def convert_value(self, value, expression, connection, context):
if value is None:
return 0
return int(value)
class Max(Aggregate):
function = 'MAX'
name = 'Max'
class Min(Aggregate):
function = 'MIN'
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
def __init__(self, expression, sample=False, **extra):
self.function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
super(StdDev, self).__init__(expression, output_field=FloatField(), **extra)
def __repr__(self):
return "{}({}, sample={})".format(
self.__class__.__name__,
self.arg_joiner.join(str(arg) for arg in self.source_expressions),
'False' if self.function == 'STDDEV_POP' else 'True',
)
def convert_value(self, value, expression, connection, context):
if value is None:
return value
return float(value)
class Sum(Aggregate):
function = 'SUM'
name = 'Sum'
def as_oracle(self, compiler, connection):
if self.output_field.get_internal_type() == 'DurationField':
expression = self.get_source_expressions()[0]
from django.db.backends.oracle.functions import IntervalToSeconds, SecondsToInterval
return compiler.compile(
SecondsToInterval(Sum(IntervalToSeconds(expression)))
)
return super(Sum, self).as_sql(compiler, connection)
class Variance(Aggregate):
name = 'Variance'
def __init__(self, expression, sample=False, **extra):
self.function = 'VAR_SAMP' if sample else 'VAR_POP'
super(Variance, self).__init__(expression, output_field=FloatField(), **extra)
def __repr__(self):
return "{}({}, sample={})".format(
self.__class__.__name__,
self.arg_joiner.join(str(arg) for arg in self.source_expressions),
'False' if self.function == 'VAR_POP' else 'True',
)
def convert_value(self, value, expression, connection, context):
if value is None:
return value
return float(value)
| bsd-3-clause | -5,585,138,041,370,369,000 | 36.005587 | 110 | 0.621679 | false |
andmaj/unrank-bottleneck-bench | sdsl_linear/gtest-1.6.0/scripts/pump.py | 603 | 23316 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirevative(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines != [] and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirevative(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsHeaderGuardOrInclude(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsComment(line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirevative(output, line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapPreprocessorDirevative(line, output)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| gpl-3.0 | -910,758,963,271,655,700 | 26.527745 | 80 | 0.622234 | false |
tonicbupt/flask-navigator | tests/test_navbar.py | 2 | 1514 | from pytest import fixture, raises
from flask.ext.navigation.navbar import NavigationBar
from flask.ext.navigation.item import Item, ItemReference
@fixture
def navbar():
navbar = NavigationBar('mybar', [
Item(u'Home', 'home'),
Item(u'News', 'news'),
])
return navbar
def test_attrs(navbar):
assert navbar.name == 'mybar'
assert len(navbar.items) == 2
def test_iterable(navbar):
iterable = iter(navbar)
item_1st = next(iterable)
assert item_1st.label == u'Home'
assert item_1st.endpoint == 'home'
item_2nd = next(iterable)
assert item_2nd.label == u'News'
assert item_2nd.endpoint == 'news'
with raises(StopIteration):
next(iterable)
item_reentry = next(iter(navbar)) # test for reentry iterable
assert item_reentry.label == u'Home'
assert item_reentry.endpoint == 'home'
def test_initializer(navbar):
@navbar.initializer
def initialize_more_items(nav):
return nav
assert navbar.initializers[0] is initialize_more_items
def test_alias_item():
navbar = NavigationBar('mybar', [
Item(u'Home', 'home'),
Item(u'News', 'news', args={'page': 1}),
], alias={
'foo': ItemReference('home'),
'bar': ItemReference('news', {'page': 1}),
'egg': ItemReference('news', {'page': 2}),
})
assert navbar.alias_item('foo').label == u'Home'
assert navbar.alias_item('bar').label == u'News'
with raises(KeyError):
navbar.alias_item('egg')
| mit | -4,549,393,273,315,191,000 | 23.819672 | 66 | 0.626816 | false |
ResearchSoftwareInstitute/MyHPOM | hs_file_types/models/base.py | 1 | 39179 | import os
import copy
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.forms.models import model_to_dict
from django.contrib.postgres.fields import HStoreField, ArrayField
from dominate.tags import div, legend, table, tr, tbody, thead, td, th, \
span, a, form, button, label, textarea, h4, input, ul, li, p
from lxml import etree
from hs_core.hydroshare.utils import get_resource_file_name_and_extension, current_site_url
from hs_core.models import ResourceFile, AbstractMetaDataElement, Coverage, CoreMetaData
class AbstractFileMetaData(models.Model):
""" base class for MyHPOM file type metadata """
# one temporal coverage and one spatial coverage
coverages = GenericRelation(Coverage)
# key/value metadata
extra_metadata = HStoreField(default={})
# keywords
keywords = ArrayField(models.CharField(max_length=100, null=True, blank=True), default=[])
# to track if any metadata element has been modified to trigger file update
is_dirty = models.BooleanField(default=False)
class Meta:
abstract = True
@classmethod
def get_metadata_model_classes(cls):
return {'coverage': Coverage}
def get_metadata_elements(self):
"""returns a list of all metadata elements (instances of AbstractMetaDataElement)
associated with this file type metadata object.
"""
return list(self.coverages.all())
def delete_all_elements(self):
self.coverages.all().delete()
self.extra_metadata = {}
self.keywords = []
self.save()
def get_html(self, include_extra_metadata=True, **kwargs):
"""Generates html for displaying all metadata elements associated with this logical file.
Subclass must override to include additional html for additional metadata it supports.
:param include_extra_metadata: a flag to control if necessary html for displaying key/value
metadata will be included
"""
root_div = div()
if self.logical_file.dataset_name:
root_div.add(self.get_dataset_name_html())
if self.keywords:
root_div.add(self.get_keywords_html())
if self.extra_metadata and include_extra_metadata:
root_div.add(self.get_key_value_metadata_html())
return root_div.render()
def get_dataset_name_html(self):
"""generates html for viewing dataset name (title)"""
if self.logical_file.dataset_name:
dataset_name_div = div(cls="col-xs-12 content-block")
with dataset_name_div:
legend("Title")
p(self.logical_file.dataset_name)
return dataset_name_div
def get_keywords_html(self):
"""generates html for viewing keywords"""
keywords_div = div()
if self.keywords:
keywords_div = div(cls="col-sm-12 content-block")
with keywords_div:
legend('Keywords')
with div(cls="tags"):
with ul(id="list-keywords-file-type", cls="tag-list custom-well"):
for kw in self.keywords:
with li():
a(kw, cls="tag")
return keywords_div
def get_key_value_metadata_html(self):
"""generates html for viewing key/vale extra metadata"""
extra_metadata_div = div()
if self.extra_metadata:
extra_metadata_div = div(cls="col-sm-12 content-block")
with extra_metadata_div:
legend('Extended Metadata')
with table(cls="hs-table table dataTable no-footer", style="width: 100%"):
with thead():
with tr(cls="header-row"):
th("Key")
th("Value")
with tbody():
for k, v in self.extra_metadata.iteritems():
with tr(data_key=k):
td(k)
td(v)
return extra_metadata_div
def get_html_forms(self, dataset_name_form=True, temporal_coverage=True, **kwargs):
"""generates html forms for all the metadata elements associated with this logical file
type
:param dataset_name_form: If True then a form for editing dataset_name (title) attribute is
included
:param temporal_coverage: if True then form elements for editing temporal coverage are
included
"""
root_div = div()
with root_div:
if dataset_name_form:
self.get_dataset_name_form()
self.get_keywords_html_form()
self.get_extra_metadata_html_form()
if temporal_coverage:
self.get_temporal_coverage_html_form()
return root_div
def get_keywords_html_form(self):
keywords_div = div(cls="col-sm-12 content-block", id="filetype-keywords")
action = "/hydroshare/hsapi/_internal/{0}/{1}/add-file-keyword-metadata/"
action = action.format(self.logical_file.__class__.__name__, self.logical_file.id)
delete_action = "/hydroshare/hsapi/_internal/{0}/{1}/delete-file-keyword-metadata/"
delete_action = delete_action.format(self.logical_file.__class__.__name__,
self.logical_file.id)
with keywords_div:
legend("Keywords")
with form(id="id-keywords-filetype", action=action, method="post",
enctype="multipart/form-data"):
input(id="id-delete-keyword-filetype-action", type="hidden",
value=delete_action)
with div(cls="tags"):
with div(id="add-keyword-wrapper", cls="input-group"):
input(id="txt-keyword-filetype", cls="form-control",
placeholder="keyword",
type="text", name="keywords")
with span(cls="input-group-btn"):
a("Add", id="btn-add-keyword-filetype", cls="btn btn-success",
type="button")
with ul(id="lst-tags-filetype", cls="custom-well tag-list"):
for kw in self.keywords:
with li(cls="tag"):
span(kw)
with a():
span(cls="glyphicon glyphicon-remove-circle icon-remove")
p("Duplicate. Keywords not added.", id="id-keywords-filetype-msg",
cls="text-danger small", style="display: none;")
def get_spatial_coverage_form(self, allow_edit=False):
return Coverage.get_spatial_html_form(resource=None, element=self.spatial_coverage,
allow_edit=allow_edit, file_type=True)
def get_temporal_coverage_form(self, allow_edit=True):
return Coverage.get_temporal_html_form(resource=None, element=self.temporal_coverage,
file_type=True, allow_edit=allow_edit)
def get_extra_metadata_html_form(self):
def get_add_keyvalue_button():
add_key_value_btn = a(cls="btn btn-success", type="button", data_toggle="modal",
data_target="#add-keyvalue-filetype-modal",
style="margin-bottom:20px;")
with add_key_value_btn:
with span(cls="glyphicon glyphicon-plus"):
span("Add Key/Value", cls="button-label")
return add_key_value_btn
if self.extra_metadata:
root_div_extra = div(cls="col-xs-12", id="filetype-extra-metadata")
with root_div_extra:
legend('Extended Metadata')
get_add_keyvalue_button()
with table(cls="hs-table table dataTable no-footer",
style="width: 100%"):
with thead():
with tr(cls="header-row"):
th("Key")
th("Value")
th("Edit/Remove")
with tbody():
counter = 0
for k, v in self.extra_metadata.iteritems():
counter += 1
with tr(data_key=k):
td(k)
td(v)
with td():
span(data_toggle="modal", data_placement="auto", title="Edit",
cls="btn-edit-icon glyphicon glyphicon-pencil "
"icon-blue table-icon",
data_target="#edit-keyvalue-filetype-modal"
"-{}".format(counter))
span(data_toggle="modal", data_placement="auto",
title="Remove",
cls="btn-remove-icon glyphicon glyphicon-trash "
"btn-remove table-icon",
data_target="#delete-keyvalue-filetype-modal"
"-{}".format(counter))
self._get_add_key_value_modal_form()
self._get_edit_key_value_modal_forms()
self._get_delete_key_value_modal_forms()
return root_div_extra
else:
root_div_extra = div(id="filetype-extra-metadata", cls="col-xs-12 content-block")
with root_div_extra:
legend('Extended Metadata')
get_add_keyvalue_button()
self._get_add_key_value_modal_form()
return root_div_extra
def get_temporal_coverage_html_form(self):
# Note: When using this form layout the context variable 'temp_form' must be
# set prior to calling the template.render(context)
root_div = div(cls="col-lg-6 col-xs-12", id="temporal-coverage-filetype")
with root_div:
with form(id="id-coverage_temporal-file-type", action="{{ temp_form.action }}",
method="post", enctype="multipart/form-data"):
div("{% crispy temp_form %}")
with div(cls="row", style="margin-top:10px;"):
with div(cls="col-md-offset-10 col-xs-offset-6 "
"col-md-2 col-xs-6"):
button("Save changes", type="button",
cls="btn btn-primary pull-right",
style="display: none;")
return root_div
def has_all_required_elements(self):
return True
@classmethod
def get_supported_element_names(cls):
return ['Coverage']
def get_required_missing_elements(self):
return []
@property
def has_metadata(self):
if not self.coverages.all() and not self.extra_metadata \
and not self.logical_file.dataset_name:
return False
return True
@property
def spatial_coverage(self):
return self.coverages.exclude(type='period').first()
@property
def temporal_coverage(self):
return self.coverages.filter(type='period').first()
def add_to_xml_container(self, container):
"""Generates xml+rdf representation of all the metadata elements associated with this
logical file type instance. Subclass must override this if it has additional metadata
elements."""
NAMESPACES = CoreMetaData.NAMESPACES
dataset_container = etree.SubElement(
container, '{%s}Dataset' % NAMESPACES['hsterms'])
rdf_Description = etree.SubElement(dataset_container, '{%s}Description' % NAMESPACES['rdf'])
dc_datatype = etree.SubElement(rdf_Description, '{%s}type' % NAMESPACES['dc'])
data_type = current_site_url() + "/terms/" + self.logical_file.data_type
dc_datatype.set('{%s}resource' % NAMESPACES['rdf'], data_type)
if self.logical_file.dataset_name:
dc_datatitle = etree.SubElement(rdf_Description, '{%s}title' % NAMESPACES['dc'])
dc_datatitle.text = self.logical_file.dataset_name
# add fileType node
for res_file in self.logical_file.files.all():
hsterms_datafile = etree.SubElement(rdf_Description,
'{%s}dataFile' % NAMESPACES['hsterms'])
rdf_dataFile_Description = etree.SubElement(hsterms_datafile,
'{%s}Description' % NAMESPACES['rdf'])
file_uri = u'{hs_url}/resource/{res_id}/data/contents/{file_name}'.format(
hs_url=current_site_url(),
res_id=self.logical_file.resource.short_id,
file_name=res_file.short_path)
rdf_dataFile_Description.set('{%s}about' % NAMESPACES['rdf'], file_uri)
dc_title = etree.SubElement(rdf_dataFile_Description,
'{%s}title' % NAMESPACES['dc'])
file_name = get_resource_file_name_and_extension(res_file)[1]
dc_title.text = file_name
dc_format = etree.SubElement(rdf_dataFile_Description, '{%s}format' % NAMESPACES['dc'])
dc_format.text = res_file.mime_type
self.add_keywords_to_xml_container(rdf_Description)
self.add_extra_metadata_to_xml_container(rdf_Description)
for coverage in self.coverages.all():
coverage.add_to_xml_container(rdf_Description)
return rdf_Description
def add_extra_metadata_to_xml_container(self, container):
"""Generates xml+rdf representation of the all the key/value metadata associated
with an instance of the logical file type"""
for key, value in self.extra_metadata.iteritems():
hsterms_key_value = etree.SubElement(
container, '{%s}extendedMetadata' % CoreMetaData.NAMESPACES['hsterms'])
hsterms_key_value_rdf_Description = etree.SubElement(
hsterms_key_value, '{%s}Description' % CoreMetaData.NAMESPACES['rdf'])
hsterms_key = etree.SubElement(hsterms_key_value_rdf_Description,
'{%s}key' % CoreMetaData.NAMESPACES['hsterms'])
hsterms_key.text = key
hsterms_value = etree.SubElement(hsterms_key_value_rdf_Description,
'{%s}value' % CoreMetaData.NAMESPACES['hsterms'])
hsterms_value.text = value
def add_keywords_to_xml_container(self, container):
"""Generates xml+rdf representation of the all the keywords associated
with an instance of the logical file type"""
for kw in self.keywords:
dc_subject = etree.SubElement(container, '{%s}subject' % CoreMetaData.NAMESPACES['dc'])
dc_subject.text = kw
def create_element(self, element_model_name, **kwargs):
# had to import here to avoid circular import
from hs_file_types.utils import update_resource_coverage_element
model_type = self._get_metadata_element_model_type(element_model_name)
kwargs['content_object'] = self
element = model_type.model_class().create(**kwargs)
if element_model_name.lower() == "coverage":
resource = element.metadata.logical_file.resource
# resource will be None in case of coverage element being
# created as part of copying a resource that supports logical file
# types
if resource is not None:
update_resource_coverage_element(resource)
return element
def update_element(self, element_model_name, element_id, **kwargs):
# had to import here to avoid circular import
from hs_file_types.utils import update_resource_coverage_element
model_type = self._get_metadata_element_model_type(element_model_name)
kwargs['content_object'] = self
model_type.model_class().update(element_id, **kwargs)
self.is_dirty = True
self.save()
if element_model_name.lower() == "coverage":
element = model_type.model_class().objects.get(id=element_id)
resource = element.metadata.logical_file.resource
update_resource_coverage_element(resource)
def delete_element(self, element_model_name, element_id):
model_type = self._get_metadata_element_model_type(element_model_name)
model_type.model_class().remove(element_id)
self.is_dirty = True
self.save()
def _get_metadata_element_model_type(self, element_model_name):
element_model_name = element_model_name.lower()
if not self._is_valid_element(element_model_name):
raise ValidationError("Metadata element type:%s is not one of the "
"supported metadata elements for %s."
% element_model_name, type(self))
unsupported_element_error = "Metadata element type:%s is not supported." \
% element_model_name
try:
model_type = ContentType.objects.get(app_label=self.model_app_label,
model=element_model_name)
except ObjectDoesNotExist:
try:
model_type = ContentType.objects.get(app_label='hs_core',
model=element_model_name)
except ObjectDoesNotExist:
raise ValidationError(unsupported_element_error)
if not issubclass(model_type.model_class(), AbstractMetaDataElement):
raise ValidationError(unsupported_element_error)
return model_type
def _is_valid_element(self, element_name):
allowed_elements = [el.lower() for el in self.get_supported_element_names()]
return element_name.lower() in allowed_elements
@classmethod
def validate_element_data(cls, request, element_name):
"""Subclass must implement this function to validate data for for the
specified metadata element (element_name)"""
raise NotImplementedError
def get_dataset_name_form(self):
form_action = "/hydroshare/hsapi/_internal/{0}/{1}/update-filetype-dataset-name/"
form_action = form_action.format(self.logical_file.__class__.__name__, self.logical_file.id)
root_div = div(cls="col-xs-12")
dataset_name = self.logical_file.dataset_name if self.logical_file.dataset_name else ""
with root_div:
with form(action=form_action, id="filetype-dataset-name",
method="post", enctype="multipart/form-data"):
div("{% csrf_token %}")
with div(cls="form-group"):
with div(cls="control-group"):
legend('Title')
with div(cls="controls"):
input(value=dataset_name,
cls="form-control input-sm textinput textInput",
id="file_dataset_name", maxlength="250",
name="dataset_name", type="text")
with div(cls="row", style="margin-top:10px;"):
with div(cls="col-md-offset-10 col-xs-offset-6 col-md-2 col-xs-6"):
button("Save changes", cls="btn btn-primary pull-right btn-form-submit",
style="display: none;", type="button")
return root_div
def _get_add_key_value_modal_form(self):
form_action = "/hydroshare/hsapi/_internal/{0}/{1}/update-file-keyvalue-metadata/"
form_action = form_action.format(self.logical_file.__class__.__name__, self.logical_file.id)
modal_div = div(cls="modal fade", id="add-keyvalue-filetype-modal", tabindex="-1",
role="dialog", aria_labelledby="add-key-value-metadata",
aria_hidden="true")
with modal_div:
with div(cls="modal-dialog", role="document"):
with div(cls="modal-content"):
with form(action=form_action, id="add-keyvalue-filetype-metadata",
method="post", enctype="multipart/form-data"):
div("{% csrf_token %}")
with div(cls="modal-header"):
button("x", type="button", cls="close", data_dismiss="modal",
aria_hidden="true")
h4("Add Key/Value Metadata", cls="modal-title",
id="add-key-value-metadata")
with div(cls="modal-body"):
with div(cls="form-group"):
with div(cls="control-group"):
label("Key", cls="control-label requiredField",
fr="file_extra_meta_name")
with div(cls="controls"):
input(cls="form-control input-sm textinput textInput",
id="file_extra_meta_name", maxlength="100",
name="name", type="text")
with div(cls="control-group"):
label("Value", cls="control-label requiredField",
fr="file_extra_meta_value")
with div(cls="controls"):
textarea(cls="form-control input-sm textarea",
cols="40", rows="10",
id="file_extra_meta_value",
style="resize: vertical;",
name="value", type="text")
with div(cls="modal-footer"):
button("Cancel", type="button", cls="btn btn-default",
data_dismiss="modal")
button("OK", type="button", cls="btn btn-primary",
id="btn-confirm-add-metadata") # TODO: TESTING
return modal_div
def _get_edit_key_value_modal_forms(self):
# TODO: See if can use one modal dialog to edit any pair of key/value
form_action = "/hydroshare/hsapi/_internal/{0}/{1}/update-file-keyvalue-metadata/"
form_action = form_action.format(self.logical_file.__class__.__name__, self.logical_file.id)
counter = 0
root_div = div(id="edit-keyvalue-filetype-modals")
with root_div:
for k, v in self.extra_metadata.iteritems():
counter += 1
modal_div = div(cls="modal fade",
id="edit-keyvalue-filetype-modal-{}".format(counter),
tabindex="-1",
role="dialog", aria_labelledby="edit-key-value-metadata",
aria_hidden="true")
with modal_div:
with div(cls="modal-dialog", role="document"):
with div(cls="modal-content"):
form_id = "edit-keyvalue-filetype-metadata-{}".format(counter)
with form(action=form_action,
id=form_id, data_counter="{}".format(counter),
method="post", enctype="multipart/form-data"):
div("{% csrf_token %}")
with div(cls="modal-header"):
button("x", type="button", cls="close", data_dismiss="modal",
aria_hidden="true")
h4("Update Key/Value Metadata", cls="modal-title",
id="edit-key-value-metadata")
with div(cls="modal-body"):
with div(cls="form-group"):
with div(cls="control-group"):
label("Key(Original)",
cls="control-label requiredField",
fr="file_extra_meta_key_original")
with div(cls="controls"):
input(value=k, readonly="readonly",
cls="form-control input-sm textinput "
"textInput",
id="file_extra_meta_key_original",
maxlength="100",
name="key_original", type="text")
with div(cls="control-group"):
label("Key", cls="control-label requiredField",
fr="file_extra_meta_key")
with div(cls="controls"):
input(value=k,
cls="form-control input-sm textinput "
"textInput",
id="file_extra_meta_key", maxlength="100",
name="key", type="text")
with div(cls="control-group"):
label("Value", cls="control-label requiredField",
fr="file_extra_meta_value")
with div(cls="controls"):
textarea(v,
cls="form-control input-sm textarea",
cols="40", rows="10",
id="file_extra_meta_value",
style="resize: vertical;",
name="value", type="text")
with div(cls="modal-footer"):
button("Cancel", type="button", cls="btn btn-default",
data_dismiss="modal")
button("OK", id="btn-confirm-edit-key-value",
type="button", cls="btn btn-primary")
return root_div
def _get_delete_key_value_modal_forms(self):
form_action = "/hydroshare/hsapi/_internal/{0}/{1}/delete-file-keyvalue-metadata/"
form_action = form_action.format(self.logical_file.__class__.__name__, self.logical_file.id)
counter = 0
root_div = div(id="delete-keyvalue-filetype-modals")
with root_div:
for k, v in self.extra_metadata.iteritems():
counter += 1
modal_div = div(cls="modal fade",
id="delete-keyvalue-filetype-modal-{}".format(counter),
tabindex="-1",
role="dialog", aria_labelledby="delete-key-value-metadata",
aria_hidden="true")
with modal_div:
with div(cls="modal-dialog", role="document"):
with div(cls="modal-content"):
form_id = "delete-keyvalue-filetype-metadata-{}".format(counter)
with form(action=form_action,
id=form_id,
method="post", enctype="multipart/form-data"):
div("{% csrf_token %}")
with div(cls="modal-header"):
button("x", type="button", cls="close", data_dismiss="modal",
aria_hidden="true")
h4("Confirm to Delete Key/Value Metadata", cls="modal-title",
id="delete-key-value-metadata")
with div(cls="modal-body"):
with div(cls="form-group"):
with div(cls="control-group"):
label("Key", cls="control-label requiredField",
fr="file_extra_meta_name")
with div(cls="controls"):
input(cls="form-control input-sm textinput "
"textInput", value=k,
id="file_extra_meta_key", maxlength="100",
name="key", type="text", readonly="readonly")
with div(cls="control-group"):
label("Value", cls="control-label requiredField",
fr="file_extra_meta_value")
with div(cls="controls"):
textarea(v, cls="form-control input-sm textarea",
cols="40", rows="10",
id="file_extra_meta_value",
style="resize: vertical;",
name="value", type="text",
readonly="readonly")
with div(cls="modal-footer"):
button("Cancel", type="button", cls="btn btn-default",
data_dismiss="modal")
button("Delete", type="button", cls="btn btn-danger",
id="btn-delete-key-value") # TODO: TESTING
return root_div
class AbstractLogicalFile(models.Model):
""" base class for MyHPOM file types """
# files associated with this logical file group
files = GenericRelation(ResourceFile, content_type_field='logical_file_content_type',
object_id_field='logical_file_object_id')
# the dataset name will allow us to identify a logical file group on user interface
dataset_name = models.CharField(max_length=255, null=True, blank=True)
# this will be used for dc:type in resourcemetadata.xml
# each specific logical type needs to reset this field
# also this data type needs to be defined in in terms.html page
data_type = "Generic"
class Meta:
abstract = True
@classmethod
def get_allowed_uploaded_file_types(cls):
# any file can be part of this logical file group - subclass needs to override this
return [".*"]
@classmethod
def get_allowed_storage_file_types(cls):
# can store any file types in this logical file group - subclass needs to override this
return [".*"]
@classmethod
def type_name(cls):
return cls.__name__
@property
def has_metadata(self):
return hasattr(self, 'metadata')
@property
def size(self):
# get total size (in bytes) of all files in this file type
return sum([f.size for f in self.files.all()])
@property
def resource(self):
res_file = self.files.all().first()
if res_file is not None:
return res_file.resource
else:
return None
@property
def supports_resource_file_move(self):
"""allows a resource file that is part of this logical file type to be moved"""
return True
@property
def supports_resource_file_add(self):
"""allows a resource file to be added"""
return True
@property
def supports_resource_file_rename(self):
"""allows a resource file that is part of this logical file type to be renamed"""
return True
@property
def supports_zip(self):
"""allows a folder containing resource file(s) that are part of this logical file type
to be zipped"""
return True
@property
def supports_delete_folder_on_zip(self):
"""allows the original folder to be deleted upon zipping of that folder"""
return True
@property
def supports_unzip(self):
"""allows a zip file that is part of this logical file type to get unzipped"""
return True
def add_resource_file(self, res_file):
"""Makes a ResourceFile (res_file) object part of this logical file object. If res_file
is already associated with any other logical file object, this function does not do
anything to that logical object. The caller needs to take necessary action for the
previously associated logical file object. If res_file is already part of this
logical file, it raise ValidationError.
:param res_file an instance of ResourceFile
"""
if res_file in self.files.all():
raise ValidationError("Resource file is already part of this logical file.")
res_file.logical_file_content_object = self
res_file.save()
# TODO: unit test this
def reset_to_generic(self, user):
"""
This sets all files in this logical file group to GenericLogicalFile type
:param user: user who is re-setting to generic file type
:return:
"""
from .generic import GenericLogicalFile
for res_file in self.files.all():
if res_file.has_logical_file:
res_file.logical_file.logical_delete(user=user, delete_res_files=False)
logical_file = GenericLogicalFile.create()
res_file.logical_file_content_object = logical_file
res_file.save()
def get_copy(self):
"""creates a copy of this logical file object with associated metadata needed to support
resource copy.
Note: This copied logical file however does not have any association with resource files
"""
copy_of_logical_file = type(self).create()
copy_of_logical_file.dataset_name = self.dataset_name
copy_of_logical_file.metadata.extra_metadata = copy.deepcopy(self.metadata.extra_metadata)
copy_of_logical_file.metadata.keywords = self.metadata.keywords
copy_of_logical_file.metadata.save()
copy_of_logical_file.save()
# copy the metadata elements
elements_to_copy = self.metadata.get_metadata_elements()
for element in elements_to_copy:
element_args = model_to_dict(element)
element_args.pop('content_type')
element_args.pop('id')
element_args.pop('object_id')
copy_of_logical_file.metadata.create_element(element.term, **element_args)
return copy_of_logical_file
@classmethod
def compute_file_type_folder(cls, resource, file_folder, file_name):
"""
Computes the new folder path where the file type files will be stored
:param resource: an instance of BaseResource
:param file_folder: current file folder of the file which is being set to a specific file
type
:param file_name: name of the file (without extension) which is being set to a specific
file type
:return: computed new folder path
"""
current_folder_path = 'data/contents'
if file_folder is not None:
current_folder_path = os.path.join(current_folder_path, file_folder)
new_folder_path = os.path.join(current_folder_path, file_name)
# To avoid folder creation failure when there is already matching
# directory path, first check that the folder does not exist
# If folder path exists then change the folder name by adding a number
# to the end
istorage = resource.get_irods_storage()
counter = 0
while istorage.exists(os.path.join(resource.short_id, new_folder_path)):
new_file_name = file_name + "_{}".format(counter)
new_folder_path = os.path.join(current_folder_path, new_file_name)
counter += 1
return new_folder_path
def logical_delete(self, user, delete_res_files=True):
"""
Deletes the logical file as well as all resource files associated with this logical file.
This function is primarily used by the system to delete logical file object and associated
metadata as part of deleting a resource file object. Any time a request is made to
deleted a specific resource file object, if the the requested file is part of a
logical file then all files in the same logical file group will be deleted. if custom logic
requires deleting logical file object (LFO) then instead of using LFO.delete(), you must
use LFO.logical_delete()
:param delete_res_files If True all resource files that are part of this logical file will
be deleted
"""
from hs_core.hydroshare.resource import delete_resource_file
# delete all resource files associated with this instance of logical file
if delete_res_files:
for f in self.files.all():
delete_resource_file(f.resource.short_id, f.id, user,
delete_logical_file=False)
# delete logical file first then delete the associated metadata file object
# deleting the logical file object will not automatically delete the associated
# metadata file object
metadata = self.metadata if self.has_metadata else None
super(AbstractLogicalFile, self).delete()
if metadata is not None:
# this should also delete on all metadata elements that have generic relations with
# the metadata object
metadata.delete()
| bsd-3-clause | -2,300,341,929,535,121,700 | 49.553548 | 100 | 0.524465 | false |
forrestv/pyderiv | deriv.py | 1 | 5500 | # This file is part of pyderiv. http://forre.st/pyderiv
#
# pyderiv is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# pyderiv is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyderiv. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import math
import itertools
import _deriv
class Variable(object):
def __init__(self, v):
self.v = v
def __add__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._add(self.v, other))
__radd__ = __add__
def __sub__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._sub(self.v, other))
def __rsub__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._sub(other, self.v))
def __mul__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._mul(self.v, other))
__rmul__ = __mul__
def __div__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._div(self.v, other))
__truediv__ = __div__
def __rdiv__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._div(other, self.v))
__rtruediv__ = __rdiv__
def __pow__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._pow(self.v, other))
def __rpow__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._pow(other, self.v))
def __repr__(self):
return "Variable(%r)" % (self.v,)
id_generator = itertools.count()
def varying(value, d_count=1, noise=False):
assert d_count >= 0
if d_count == 0:
return value
o = id_generator.next()
if noise:
o = ~o
v = (value, {o: 1})
p = v
for i in xrange(d_count - 1):
p[1][o] = (p[1][o], {o: 0})
p = p[1][o]
return Variable(v)
def sin(self):
if isinstance(self, Variable):
return Variable(_deriv._sin(self.v))
else:
return math.sin(self)
def cos(self):
if isinstance(self, Variable):
return Variable(_deriv._cos(self.v))
else:
return math.cos(self)
def log(self):
if isinstance(self, Variable):
return Variable(_deriv.log(self.v))
else:
return math.log(self)
def v(self):
if not isinstance(self, Variable):
return self
return _deriv._v(self.v)
def d(self, *others):
if not isinstance(self, Variable):
return 0
for other in others:
assert isinstance(other, Variable)
assert len(other.v[1]) == 1
self = Variable(_deriv._d(self.v).get(other.v[1].keys()[0], 0))
return self
def get_matrix(output, input, n=1):
return [[v(d(y, *(x,)*n)) for x in input] for y in output]
def matrix_wrapper(n):
def b(f):
def a(args, *extra_args, **extra_kwargs):
args2 = [varying(x) for x in args]
result2 = f(args2, *extra_args, **extra_kwargs)
result = result2[0]
return (([v(x) for x in result], get_matrix(result, args2, n)),) + tuple(result2[1:])
return a
return b
jacobian_decorator = matrix_wrapper(1)
hessian_decorator = matrix_wrapper(2)
if __name__ == "__main__":
w, x, y, z = [varying(n) for n in [3, 4, 5, 6]]
#q = w / x + y * z + 1 / w
q = \
(w + 4) + (4 + w) + (w + x) + (x + w) + \
(w - 4) + (4 - w) + (w - x) + (x - w) + \
(w * 4) + (4 * w) + (w * x) + (x * w) + \
(w / 4) + (4 / w) + (w / x) + (x / w) + \
(w **4) + (4 **w) + (w **x) + (x **w) + \
0
#q = w ** x
print q
print "q", v(q)
print "dq/dw", v(d(q, w))
print "dq/dx", v(d(q, x))
print "dq/dy", v(d(q, y))
print "dq/dz", v(d(q, z))
print "ddq/dw", v(d(q, w, w))
print "ddq/dx", v(d(q, x, x))
print "ddq/dy", v(d(q, y, y))
print "ddq/dz", v(d(q, z, z))
def f((a, b, c)):
return ((a + b + c, a * b * c, a * b + c, a + b * c, a * a + b * b + c * c), 5)
j = jacobian_decorator(f)
h = hessian_decorator(f)
print f((1., 2., 3.))
print j((1., 2., 3.))
print h((1., 2., 3.))
| gpl-3.0 | -2,566,797,785,603,388,400 | 29.054645 | 97 | 0.559636 | false |
David-Amaro/bank-payment | __unported__/account_banking/migrations/6.1.0.1.81/post-set-statement-line-state.py | 14 | 1439 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Therp BV (<http://therp.nl>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" r81: introduction of bank statement line state
"""
__name__ = ("account.bank.statement.line:: set new field 'state' to "
"confirmed for all statement lines belonging to confirmed "
"statements")
def migrate(cr, version):
cr.execute("UPDATE account_bank_statement_line as sl "
" SET state = 'confirmed'"
" FROM account_bank_statement as s "
" WHERE sl.statement_id = s.id "
" AND s.state = 'confirm' "
)
| agpl-3.0 | 533,808,001,725,223,360 | 41.323529 | 78 | 0.578874 | false |
chrta/canfestival-3-ct | objdictgen/eds_utils.py | 2 | 40678 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#This file is part of CanFestival, a library implementing CanOpen Stack.
#
#Copyright (C): Edouard TISSERANT, Francis DUPIN and Laurent BESSARD
#
#See COPYING file for copyrights details.
#
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 2.1 of the License, or (at your option) any later version.
#
#This library is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#Lesser General Public License for more details.
#
#You should have received a copy of the GNU Lesser General Public
#License along with this library; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import node
from node import nosub, var, array, rec, plurivar, pluriarray, plurirec
try:
set
except NameError:
from sets import Set as set
from types import *
from time import *
import os,re
# Regular expression for finding index section names
index_model = re.compile('([0-9A-F]{1,4}$)')
# Regular expression for finding subindex section names
subindex_model = re.compile('([0-9A-F]{1,4})SUB([0-9A-F]{1,2}$)')
# Regular expression for finding index section names
index_objectlinks_model = re.compile('([0-9A-F]{1,4}OBJECTLINKS$)')
# Regular expression for finding NodeXPresent keynames
nodepresent_model = re.compile('NODE([0-9]{1,3})PRESENT$')
# Regular expression for finding NodeXName keynames
nodename_model = re.compile('NODE([0-9]{1,3})NAME$')
# Regular expression for finding NodeXDCFName keynames
nodedcfname_model = re.compile('NODE([0-9]{1,3})DCFNAME$')
# Dictionary for quickly translate boolean into integer value
BOOL_TRANSLATE = {True : "1", False : "0"}
# Dictionary for quickly translate eds access value into canfestival access value
ACCESS_TRANSLATE = {"RO" : "ro", "WO" : "wo", "RW" : "rw", "RWR" : "rw", "RWW" : "rw", "CONST" : "ro"}
# Function for verifying data values
is_integer = lambda x: type(x) in (IntType, LongType)
is_string = lambda x: type(x) in (StringType, UnicodeType)
is_boolean = lambda x: x in (0, 1)
# Define checking of value for each attribute
ENTRY_ATTRIBUTES = {"SUBNUMBER" : is_integer,
"PARAMETERNAME" : is_string,
"OBJECTTYPE" : lambda x: x in (2, 7, 8, 9),
"DATATYPE" : is_integer,
"LOWLIMIT" : is_integer,
"HIGHLIMIT" : is_integer,
"ACCESSTYPE" : lambda x: x.upper() in ACCESS_TRANSLATE.keys(),
"DEFAULTVALUE" : lambda x: True,
"PDOMAPPING" : is_boolean,
"OBJFLAGS" : is_integer,
"PARAMETERVALUE" : lambda x: True,
"UPLOADFILE" : is_string,
"DOWNLOADFILE" : is_string}
# Define entry parameters by entry ObjectType number
ENTRY_TYPES = {2 : {"name" : " DOMAIN",
"require" : ["PARAMETERNAME", "OBJECTTYPE"],
"optional" : ["DATATYPE", "ACCESSTYPE", "DEFAULTVALUE", "OBJFLAGS"]},
7 : {"name" : " VAR",
"require" : ["PARAMETERNAME", "DATATYPE", "ACCESSTYPE"],
"optional" : ["OBJECTTYPE", "DEFAULTVALUE", "PDOMAPPING", "LOWLIMIT", "HIGHLIMIT", "OBJFLAGS", "PARAMETERVALUE"]},
8 : {"name" : "n ARRAY",
"require" : ["PARAMETERNAME", "OBJECTTYPE", "SUBNUMBER"],
"optional" : ["OBJFLAGS"]},
9 : {"name" : " RECORD",
"require" : ["PARAMETERNAME", "OBJECTTYPE", "SUBNUMBER"],
"optional" : ["OBJFLAGS"]}}
# Function that search into Node Mappings the informations about an index or a subindex
# and return the default value
def GetDefaultValue(Node, index, subIndex = None):
infos = Node.GetEntryInfos(index)
if infos["struct"] & node.OD_MultipleSubindexes:
# First case entry is a record
if infos["struct"] & node.OD_IdenticalSubindexes:
subentry_infos = Node.GetSubentryInfos(index, 1)
# Second case entry is an array
else:
subentry_infos = Node.GetSubentryInfos(index, subIndex)
# If a default value is defined for this subindex, returns it
if "default" in subentry_infos:
return subentry_infos["default"]
# If not, returns the default value for the subindex type
else:
return Node.GetTypeDefaultValue(subentry_infos["type"])
# Third case entry is a var
else:
subentry_infos = Node.GetSubentryInfos(index, 0)
# If a default value is defined for this subindex, returns it
if "default" in subentry_infos:
return subentry_infos["default"]
# If not, returns the default value for the subindex type
else:
return Node.GetTypeDefaultValue(subentry_infos["type"])
return None
#-------------------------------------------------------------------------------
# Parse file
#-------------------------------------------------------------------------------
# List of section names that are not index and subindex and that we can meet in
# an EDS file
SECTION_KEYNAMES = ["FILEINFO", "DEVICEINFO", "DUMMYUSAGE", "COMMENTS",
"MANDATORYOBJECTS", "OPTIONALOBJECTS", "MANUFACTUREROBJECTS",
"STANDARDDATATYPES", "SUPPORTEDMODULES"]
# Function that extract sections from a file and returns a dictionary of the informations
def ExtractSections(file):
return [(blocktuple[0], # EntryName : Assignements dict
blocktuple[-1].splitlines()) # all the lines
for blocktuple in [ # Split the eds files into
block.split("]", 1) # (EntryName,Assignements) tuple
for block in # for each blocks staring with '['
("\n"+file).split("\n[")]
if blocktuple[0].isalnum()] # if EntryName exists
# Function that parse an CPJ file and returns a dictionary of the informations
def ParseCPJFile(filepath):
networks = []
# Read file text
cpj_file = open(filepath,'r').read()
sections = ExtractSections(cpj_file)
# Parse assignments for each section
for section_name, assignments in sections:
# Verify that section name is TOPOLOGY
if section_name.upper() in "TOPOLOGY":
# Reset values for topology
topology = {"Name" : "", "Nodes" : {}}
for assignment in assignments:
# Escape any comment
if assignment.startswith(";"):
pass
# Verify that line is a valid assignment
elif assignment.find('=') > 0:
# Split assignment into the two values keyname and value
keyname, value = assignment.split("=", 1)
# keyname must be immediately followed by the "=" sign, so we
# verify that there is no whitespace into keyname
if keyname.isalnum():
# value can be preceded and followed by whitespaces, so we escape them
value = value.strip()
# First case, value starts with "0x" or "-0x", then it's an hexadecimal value
if value.startswith("0x") or value.startswith("-0x"):
try:
computed_value = int(value, 16)
except:
raise SyntaxError, _("\"%s\" is not a valid value for attribute \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
elif value.isdigit() or value.startswith("-") and value[1:].isdigit():
# Second case, value is a number and starts with "0" or "-0", then it's an octal value
if value.startswith("0") or value.startswith("-0"):
computed_value = int(value, 8)
# Third case, value is a number and don't start with "0", then it's a decimal value
else:
computed_value = int(value)
# In any other case, we keep string value
else:
computed_value = value
# Search if the section name match any cpj expression
nodepresent_result = nodepresent_model.match(keyname.upper())
nodename_result = nodename_model.match(keyname.upper())
nodedcfname_result = nodedcfname_model.match(keyname.upper())
if keyname.upper() == "NETNAME":
if not is_string(computed_value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
topology["Name"] = computed_value
elif keyname.upper() == "NODES":
if not is_integer(computed_value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
topology["Number"] = computed_value
elif keyname.upper() == "EDSBASENAME":
if not is_string(computed_value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
topology["Path"] = computed_value
elif nodepresent_result:
if not is_boolean(computed_value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
nodeid = int(nodepresent_result.groups()[0])
if nodeid not in topology["Nodes"].keys():
topology["Nodes"][nodeid] = {}
topology["Nodes"][nodeid]["Present"] = computed_value
elif nodename_result:
if not is_string(value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
nodeid = int(nodename_result.groups()[0])
if nodeid not in topology["Nodes"].keys():
topology["Nodes"][nodeid] = {}
topology["Nodes"][nodeid]["Name"] = computed_value
elif nodedcfname_result:
if not is_string(computed_value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
nodeid = int(nodedcfname_result.groups()[0])
if nodeid not in topology["Nodes"].keys():
topology["Nodes"][nodeid] = {}
topology["Nodes"][nodeid]["DCFName"] = computed_value
else:
raise SyntaxError, _("Keyname \"%s\" not recognised for section \"[%s]\"")%(keyname, section_name)
# All lines that are not empty and are neither a comment neither not a valid assignment
elif assignment.strip() != "":
raise SyntaxError, _("\"%s\" is not a valid CPJ line")%assignment.strip()
if "Number" not in topology.keys():
raise SyntaxError, _("\"Nodes\" keyname in \"[%s]\" section is missing")%section_name
if topology["Number"] != len(topology["Nodes"]):
raise SyntaxError, _("\"Nodes\" value not corresponding to number of nodes defined")
for nodeid, node in topology["Nodes"].items():
if "Present" not in node.keys():
raise SyntaxError, _("\"Node%dPresent\" keyname in \"[%s]\" section is missing")%(nodeid, section_name)
networks.append(topology)
# In other case, there is a syntax problem into CPJ file
else:
raise SyntaxError, _("Section \"[%s]\" is unrecognized")%section_name
return networks
# Function that parse an EDS file and returns a dictionary of the informations
def ParseEDSFile(filepath):
eds_dict = {}
# Read file text
eds_file = open(filepath,'r').read()
sections = ExtractSections(eds_file)
# Parse assignments for each section
for section_name, assignments in sections:
# Reset values of entry
values = {}
# Search if the section name match an index or subindex expression
index_result = index_model.match(section_name.upper())
subindex_result = subindex_model.match(section_name.upper())
index_objectlinks_result = index_objectlinks_model.match(section_name.upper())
# Compilation of the EDS information dictionary
is_entry = False
# First case, section name is in SECTION_KEYNAMES
if section_name.upper() in SECTION_KEYNAMES:
# Verify that entry is not already defined
if section_name.upper() not in eds_dict:
eds_dict[section_name.upper()] = values
else:
raise SyntaxError, _("\"[%s]\" section is defined two times")%section_name
# Second case, section name is an index name
elif index_result:
# Extract index number
index = int(index_result.groups()[0], 16)
# If index hasn't been referenced before, we add an entry into the dictionary
if index not in eds_dict:
eds_dict[index] = values
eds_dict[index]["subindexes"] = {}
elif eds_dict[index].keys() == ["subindexes"]:
values["subindexes"] = eds_dict[index]["subindexes"]
eds_dict[index] = values
else:
raise SyntaxError, _("\"[%s]\" section is defined two times")%section_name
is_entry = True
# Third case, section name is a subindex name
elif subindex_result:
# Extract index and subindex number
index, subindex = [int(value, 16) for value in subindex_result.groups()]
# If index hasn't been referenced before, we add an entry into the dictionary
# that will be updated later
if index not in eds_dict:
eds_dict[index] = {"subindexes" : {}}
if subindex not in eds_dict[index]["subindexes"]:
eds_dict[index]["subindexes"][subindex] = values
else:
raise SyntaxError, _("\"[%s]\" section is defined two times")%section_name
is_entry = True
# Third case, section name is a subindex name
elif index_objectlinks_result:
pass
# In any other case, there is a syntax problem into EDS file
else:
raise SyntaxError, _("Section \"[%s]\" is unrecognized")%section_name
for assignment in assignments:
# Escape any comment
if assignment.startswith(";"):
pass
# Verify that line is a valid assignment
elif assignment.find('=') > 0:
# Split assignment into the two values keyname and value
keyname, value = assignment.split("=", 1)
# keyname must be immediately followed by the "=" sign, so we
# verify that there is no whitespace into keyname
if keyname.isalnum():
# value can be preceded and followed by whitespaces, so we escape them
value = value.strip().replace(" ", "")
# First case, value starts with "$NODEID", then it's a formula
if value.upper().startswith("$NODEID"):
try:
test = int(value.upper().replace("$NODEID+", ""), 16)
computed_value = "\"%s\""%value
except:
raise SyntaxError, _("\"%s\" is not a valid formula for attribute \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
# Second case, value starts with "0x", then it's an hexadecimal value
elif value.startswith("0x") or value.startswith("-0x"):
try:
computed_value = int(value, 16)
except:
raise SyntaxError, _("\"%s\" is not a valid value for attribute \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
elif value.isdigit() or value.startswith("-") and value[1:].isdigit():
# Third case, value is a number and starts with "0", then it's an octal value
if value.startswith("0") or value.startswith("-0"):
computed_value = int(value, 8)
# Forth case, value is a number and don't start with "0", then it's a decimal value
else:
computed_value = int(value)
# In any other case, we keep string value
else:
computed_value = value
# Add value to values dictionary
if computed_value != "":
# If entry is an index or a subindex
if is_entry:
# Verify that keyname is a possible attribute
if keyname.upper() not in ENTRY_ATTRIBUTES:
raise SyntaxError, _("Keyname \"%s\" not recognised for section \"[%s]\"")%(keyname, section_name)
# Verify that value is valid
elif not ENTRY_ATTRIBUTES[keyname.upper()](computed_value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
else:
values[keyname.upper()] = computed_value
else:
values[keyname.upper()] = computed_value
# All lines that are not empty and are neither a comment neither not a valid assignment
elif assignment.strip() != "":
raise SyntaxError, _("\"%s\" is not a valid EDS line")%assignment.strip()
# If entry is an index or a subindex
if is_entry:
# Verify that entry has an ObjectType
values["OBJECTTYPE"] = values.get("OBJECTTYPE", 7)
# Extract parameters defined
keys = set(values.keys())
keys.discard("subindexes")
# Extract possible parameters and parameters required
possible = set(ENTRY_TYPES[values["OBJECTTYPE"]]["require"] +
ENTRY_TYPES[values["OBJECTTYPE"]]["optional"])
required = set(ENTRY_TYPES[values["OBJECTTYPE"]]["require"])
# Verify that parameters defined contains all the parameters required
if not keys.issuperset(required):
missing = required.difference(keys)._data.keys()
if len(missing) > 1:
attributes = _("Attributes %s are")%_(", ").join(["\"%s\""%attribute for attribute in missing])
else:
attributes = _("Attribute \"%s\" is")%missing[0]
raise SyntaxError, _("Error on section \"[%s]\":\n%s required for a %s entry")%(section_name, attributes, ENTRY_TYPES[values["OBJECTTYPE"]]["name"])
# Verify that parameters defined are all in the possible parameters
if not keys.issubset(possible):
unsupported = keys.difference(possible)._data.keys()
if len(unsupported) > 1:
attributes = _("Attributes %s are")%_(", ").join(["\"%s\""%attribute for attribute in unsupported])
else:
attributes = _("Attribute \"%s\" is")%unsupported[0]
raise SyntaxError, _("Error on section \"[%s]\":\n%s unsupported for a %s entry")%(section_name, attributes, ENTRY_TYPES[values["OBJECTTYPE"]]["name"])
VerifyValue(values, section_name, "ParameterValue")
VerifyValue(values, section_name, "DefaultValue")
return eds_dict
def VerifyValue(values, section_name, param):
if param.upper() in values:
try:
if values["DATATYPE"] in (0x09, 0x0A, 0x0B, 0x0F):
values[param.upper()] = str(values[param.upper()])
elif values["DATATYPE"] in (0x08, 0x11):
values[param.upper()] = float(values[param.upper()])
elif values["DATATYPE"] == 0x01:
values[param.upper()] = {0 : False, 1 : True}[values[param.upper()]]
else:
if not isinstance(values[param.upper()], (IntType, LongType)) and values[param.upper()].upper().find("$NODEID") == -1:
raise
except:
raise SyntaxError, _("Error on section \"[%s]\":\n%s incompatible with DataType")%(section_name, param)
# Function that write an EDS file after generate it's content
def WriteFile(filepath, content):
# Open file in write mode
cfile = open(filepath,"w")
# Write content
cfile.write(content)
# Close file
cfile.close()
# Function that generate the EDS file content for the current node in the manager
def GenerateFileContent(Node, filepath):
# Dictionary of each index contents
indexContents = {}
# Extract local time
current_time = localtime()
# Extract node informations
nodename = Node.GetNodeName()
nodeid = Node.GetNodeID()
nodetype = Node.GetNodeType()
description = Node.GetNodeDescription()
# Retreiving lists of indexes defined
entries = Node.GetIndexes()
# Generate FileInfo section
fileContent = "[FileInfo]\n"
fileContent += "FileName=%s\n"%os.path.split(filepath)[-1]
fileContent += "FileVersion=1\n"
fileContent += "FileRevision=1\n"
fileContent += "EDSVersion=4.0\n"
fileContent += "Description=%s\n"%description
fileContent += "CreationTime=%s"%strftime("%I:%M", current_time)
# %p option of strftime seems not working, then generate AM/PM by hands
if strftime("%I", current_time) == strftime("%H", current_time):
fileContent += "AM\n"
else:
fileContent += "PM\n"
fileContent += "CreationDate=%s\n"%strftime("%m-%d-%Y", current_time)
fileContent += "CreatedBy=CANFestival\n"
fileContent += "ModificationTime=%s"%strftime("%I:%M", current_time)
# %p option of strftime seems not working, then generate AM/PM by hands
if strftime("%I", current_time) == strftime("%H", current_time):
fileContent += "AM\n"
else:
fileContent += "PM\n"
fileContent += "ModificationDate=%s\n"%strftime("%m-%d-%Y", current_time)
fileContent += "ModifiedBy=CANFestival\n"
# Generate DeviceInfo section
fileContent += "\n[DeviceInfo]\n"
fileContent += "VendorName=CANFestival\n"
# Use information typed by user in Identity entry
fileContent += "VendorNumber=0x%8.8X\n"%Node.GetEntry(0x1018, 1)
fileContent += "ProductName=%s\n"%nodename
fileContent += "ProductNumber=0x%8.8X\n"%Node.GetEntry(0x1018, 2)
fileContent += "RevisionNumber=0x%8.8X\n"%Node.GetEntry(0x1018, 3)
# CANFestival support all baudrates as soon as driver choosen support them
fileContent += "BaudRate_10=1\n"
fileContent += "BaudRate_20=1\n"
fileContent += "BaudRate_50=1\n"
fileContent += "BaudRate_125=1\n"
fileContent += "BaudRate_250=1\n"
fileContent += "BaudRate_500=1\n"
fileContent += "BaudRate_800=1\n"
fileContent += "BaudRate_1000=1\n"
# Select BootUp type from the informations given by user
fileContent += "SimpleBootUpMaster=%s\n"%BOOL_TRANSLATE[nodetype == "master"]
fileContent += "SimpleBootUpSlave=%s\n"%BOOL_TRANSLATE[nodetype == "slave"]
# CANFestival characteristics
fileContent += "Granularity=8\n"
fileContent += "DynamicChannelsSupported=0\n"
fileContent += "CompactPDO=0\n"
fileContent += "GroupMessaging=0\n"
# Calculate receive and tranmit PDO numbers with the entry available
fileContent += "NrOfRXPDO=%d\n"%len([idx for idx in entries if 0x1400 <= idx <= 0x15FF])
fileContent += "NrOfTXPDO=%d\n"%len([idx for idx in entries if 0x1800 <= idx <= 0x19FF])
# LSS not supported as soon as DS-302 was not fully implemented
fileContent += "LSS_Supported=0\n"
# Generate Dummy Usage section
fileContent += "\n[DummyUsage]\n"
fileContent += "Dummy0001=0\n"
fileContent += "Dummy0002=1\n"
fileContent += "Dummy0003=1\n"
fileContent += "Dummy0004=1\n"
fileContent += "Dummy0005=1\n"
fileContent += "Dummy0006=1\n"
fileContent += "Dummy0007=1\n"
# Generate Comments section
fileContent += "\n[Comments]\n"
fileContent += "Lines=0\n"
# List of entry by type (Mandatory, Optional or Manufacturer
mandatories = []
optionals = []
manufacturers = []
# Remove all unused PDO
## for entry in entries[:]:
## if 0x1600 <= entry < 0x1800 or 0x1A00 <= entry < 0x1C00:
## subentry_value = Node.GetEntry(entry, 1)
## if subentry_value is None or subentry_value == 0:
## entries.remove(entry)
## entries.remove(entry - 0x200)
# For each entry, we generate the entry section or sections if there is subindexes
for entry in entries:
# Extract infos and values for the entry
entry_infos = Node.GetEntryInfos(entry)
values = Node.GetEntry(entry, compute = False)
# Define section name
text = "\n[%X]\n"%entry
# If there is only one value, it's a VAR entry
if type(values) != ListType:
# Extract the informations of the first subindex
subentry_infos = Node.GetSubentryInfos(entry, 0)
# Generate EDS informations for the entry
text += "ParameterName=%s\n"%subentry_infos["name"]
text += "ObjectType=0x7\n"
text += "DataType=0x%4.4X\n"%subentry_infos["type"]
text += "AccessType=%s\n"%subentry_infos["access"]
if subentry_infos["type"] == 1:
text += "DefaultValue=%s\n"%BOOL_TRANSLATE[values]
else:
text += "DefaultValue=%s\n"%values
text += "PDOMapping=%s\n"%BOOL_TRANSLATE[subentry_infos["pdo"]]
else:
# Generate EDS informations for the entry
text += "ParameterName=%s\n"%entry_infos["name"]
if entry_infos["struct"] & node.OD_IdenticalSubindexes:
text += "ObjectType=0x9\n"
else:
text += "ObjectType=0x8\n"
# Generate EDS informations for subindexes of the entry in a separate text
subtext = ""
# Reset number of subindex defined
nb_subentry = 0
for subentry, value in enumerate(values):
# Extract the informations of each subindex
subentry_infos = Node.GetSubentryInfos(entry, subentry)
# If entry is not for the compatibility, generate informations for subindex
if subentry_infos["name"] != "Compatibility Entry":
subtext += "\n[%Xsub%X]\n"%(entry, subentry)
subtext += "ParameterName=%s\n"%subentry_infos["name"]
subtext += "ObjectType=0x7\n"
subtext += "DataType=0x%4.4X\n"%subentry_infos["type"]
subtext += "AccessType=%s\n"%subentry_infos["access"]
if subentry_infos["type"] == 1:
subtext += "DefaultValue=%s\n"%BOOL_TRANSLATE[value]
else:
subtext += "DefaultValue=%s\n"%value
subtext += "PDOMapping=%s\n"%BOOL_TRANSLATE[subentry_infos["pdo"]]
# Increment number of subindex defined
nb_subentry += 1
# Write number of subindex defined for the entry
text += "SubNumber=%d\n"%nb_subentry
# Write subindex definitions
text += subtext
# Then we add the entry in the right list
# First case, entry is between 0x2000 and 0x5FFF, then it's a manufacturer entry
if 0x2000 <= entry <= 0x5FFF:
manufacturers.append(entry)
# Second case, entry is required, then it's a mandatory entry
elif entry_infos["need"]:
mandatories.append(entry)
# In any other case, it's an optional entry
else:
optionals.append(entry)
# Save text of the entry in the dictiionary of contents
indexContents[entry] = text
# Before generate File Content we sort the entry list
manufacturers.sort()
mandatories.sort()
optionals.sort()
# Generate Definition of mandatory objects
fileContent += "\n[MandatoryObjects]\n"
fileContent += "SupportedObjects=%d\n"%len(mandatories)
for idx, entry in enumerate(mandatories):
fileContent += "%d=0x%4.4X\n"%(idx + 1, entry)
# Write mandatory entries
for entry in mandatories:
fileContent += indexContents[entry]
# Generate Definition of optional objects
fileContent += "\n[OptionalObjects]\n"
fileContent += "SupportedObjects=%d\n"%len(optionals)
for idx, entry in enumerate(optionals):
fileContent += "%d=0x%4.4X\n"%(idx + 1, entry)
# Write optional entries
for entry in optionals:
fileContent += indexContents[entry]
# Generate Definition of manufacturer objects
fileContent += "\n[ManufacturerObjects]\n"
fileContent += "SupportedObjects=%d\n"%len(manufacturers)
for idx, entry in enumerate(manufacturers):
fileContent += "%d=0x%4.4X\n"%(idx + 1, entry)
# Write manufacturer entries
for entry in manufacturers:
fileContent += indexContents[entry]
# Return File Content
return fileContent
# Function that generates EDS file from current node edited
def GenerateEDSFile(filepath, node):
try:
# Generate file content
content = GenerateFileContent(node, filepath)
# Write file
WriteFile(filepath, content)
return None
except ValueError, message:
return _("Unable to generate EDS file\n%s")%message
# Function that generate the CPJ file content for the nodelist
def GenerateCPJContent(nodelist):
nodes = nodelist.SlaveNodes.keys()
nodes.sort()
fileContent = "[TOPOLOGY]\n"
fileContent += "NetName=%s\n"%nodelist.GetNetworkName()
fileContent += "Nodes=0x%2.2X\n"%len(nodes)
for nodeid in nodes:
fileContent += "Node%dPresent=0x01\n"%nodeid
fileContent += "Node%dName=%s\n"%(nodeid, nodelist.SlaveNodes[nodeid]["Name"])
fileContent += "Node%dDCFName=%s\n"%(nodeid, nodelist.SlaveNodes[nodeid]["EDS"])
fileContent += "EDSBaseName=eds\n"
return fileContent
# Function that generates Node from an EDS file
def GenerateNode(filepath, nodeID = 0):
# Create a new node
Node = node.Node(id = nodeID)
try:
# Parse file and extract dictionary of EDS entry
eds_dict = ParseEDSFile(filepath)
# Extract Profile Number from Device Type entry
ProfileNb = eds_dict[0x1000].get("DEFAULTVALUE", 0) & 0x0000ffff
# If profile is not DS-301 or DS-302
if ProfileNb not in [0, 301, 302]:
# Compile Profile name and path to .prf file
ProfileName = "DS-%d"%ProfileNb
ProfilePath = os.path.join(os.path.split(__file__)[0], "config/%s.prf"%ProfileName)
# Verify that profile is available
if os.path.isfile(ProfilePath):
try:
# Load Profile
execfile(ProfilePath)
Node.SetProfileName(ProfileName)
Node.SetProfile(Mapping)
Node.SetSpecificMenu(AddMenuEntries)
except:
pass
# Read all entries in the EDS dictionary
for entry, values in eds_dict.iteritems():
# All sections with a name in keynames are escaped
if entry in SECTION_KEYNAMES:
pass
else:
# Extract informations for the entry
entry_infos = Node.GetEntryInfos(entry)
# If no informations are available, then we write them
if not entry_infos:
# First case, entry is a DOMAIN or VAR
if values["OBJECTTYPE"] in [2, 7]:
if values["OBJECTTYPE"] == 2:
values["DATATYPE"] = values.get("DATATYPE", 0xF)
if values["DATATYPE"] != 0xF:
raise SyntaxError, _("Domain entry 0x%4.4X DataType must be 0xF(DOMAIN) if defined")%entry
# Add mapping for entry
Node.AddMappingEntry(entry, name = values["PARAMETERNAME"], struct = 1)
# Add mapping for first subindex
Node.AddMappingEntry(entry, 0, values = {"name" : values["PARAMETERNAME"],
"type" : values["DATATYPE"],
"access" : ACCESS_TRANSLATE[values["ACCESSTYPE"].upper()],
"pdo" : values.get("PDOMAPPING", 0) == 1})
# Second case, entry is an ARRAY or RECORD
elif values["OBJECTTYPE"] in [8, 9]:
# Extract maximum subindex number defined
max_subindex = max(values["subindexes"].keys())
# Add mapping for entry
Node.AddMappingEntry(entry, name = values["PARAMETERNAME"], struct = 3)
# Add mapping for first subindex
Node.AddMappingEntry(entry, 0, values = {"name" : "Number of Entries", "type" : 0x05, "access" : "ro", "pdo" : False})
# Add mapping for other subindexes
for subindex in xrange(1, int(max_subindex) + 1):
# if subindex is defined
if subindex in values["subindexes"]:
Node.AddMappingEntry(entry, subindex, values = {"name" : values["subindexes"][subindex]["PARAMETERNAME"],
"type" : values["subindexes"][subindex]["DATATYPE"],
"access" : ACCESS_TRANSLATE[values["subindexes"][subindex]["ACCESSTYPE"].upper()],
"pdo" : values["subindexes"][subindex].get("PDOMAPPING", 0) == 1})
# if not, we add a mapping for compatibility
else:
Node.AddMappingEntry(entry, subindex, values = {"name" : "Compatibility Entry", "type" : 0x05, "access" : "rw", "pdo" : False})
## # Third case, entry is an RECORD
## elif values["OBJECTTYPE"] == 9:
## # Verify that the first subindex is defined
## if 0 not in values["subindexes"]:
## raise SyntaxError, "Error on entry 0x%4.4X:\nSubindex 0 must be defined for a RECORD entry"%entry
## # Add mapping for entry
## Node.AddMappingEntry(entry, name = values["PARAMETERNAME"], struct = 7)
## # Add mapping for first subindex
## Node.AddMappingEntry(entry, 0, values = {"name" : "Number of Entries", "type" : 0x05, "access" : "ro", "pdo" : False})
## # Verify that second subindex is defined
## if 1 in values["subindexes"]:
## Node.AddMappingEntry(entry, 1, values = {"name" : values["PARAMETERNAME"] + " %d[(sub)]",
## "type" : values["subindexes"][1]["DATATYPE"],
## "access" : ACCESS_TRANSLATE[values["subindexes"][1]["ACCESSTYPE"].upper()],
## "pdo" : values["subindexes"][1].get("PDOMAPPING", 0) == 1,
## "nbmax" : 0xFE})
## else:
## raise SyntaxError, "Error on entry 0x%4.4X:\nA RECORD entry must have at least 2 subindexes"%entry
# Define entry for the new node
# First case, entry is a DOMAIN or VAR
if values["OBJECTTYPE"] in [2, 7]:
# Take default value if it is defined
if "PARAMETERVALUE" in values:
value = values["PARAMETERVALUE"]
elif "DEFAULTVALUE" in values:
value = values["DEFAULTVALUE"]
# Find default value for value type of the entry
else:
value = GetDefaultValue(Node, entry)
Node.AddEntry(entry, 0, value)
# Second case, entry is an ARRAY or a RECORD
elif values["OBJECTTYPE"] in [8, 9]:
# Verify that "Subnumber" attribute is defined and has a valid value
if "SUBNUMBER" in values and values["SUBNUMBER"] > 0:
# Extract maximum subindex number defined
max_subindex = max(values["subindexes"].keys())
Node.AddEntry(entry, value = [])
# Define value for all subindexes except the first
for subindex in xrange(1, int(max_subindex) + 1):
# Take default value if it is defined and entry is defined
if subindex in values["subindexes"] and "PARAMETERVALUE" in values["subindexes"][subindex]:
value = values["subindexes"][subindex]["PARAMETERVALUE"]
elif subindex in values["subindexes"] and "DEFAULTVALUE" in values["subindexes"][subindex]:
value = values["subindexes"][subindex]["DEFAULTVALUE"]
# Find default value for value type of the subindex
else:
value = GetDefaultValue(Node, entry, subindex)
Node.AddEntry(entry, subindex, value)
else:
raise SyntaxError, _("Array or Record entry 0x%4.4X must have a \"SubNumber\" attribute")%entry
return Node
except SyntaxError, message:
return _("Unable to import EDS file\n%s")%message
#-------------------------------------------------------------------------------
# Main Function
#-------------------------------------------------------------------------------
if __name__ == '__main__':
print ParseEDSFile("examples/PEAK MicroMod.eds")
| lgpl-2.1 | 6,767,115,358,776,499,000 | 50.621827 | 167 | 0.54012 | false |
ZirkCoin/ZirkCoin | share/qt/extract_strings_qt.py | 1294 | 1784 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit | 5,940,951,416,686,363,000 | 24.485714 | 80 | 0.563341 | false |
iwarobots/aerodynamics | src/test_section.py | 1 | 1447 | #!/usr/bin/env python
from __future__ import absolute_import, division
from common import Model
# TODO: Refinement needed.
class TestSection(Model):
def __init__(self,
in_mach,
in_area,
in_p,
in_t,
p01,
t01,
z_len,
t_len):
self._in_mach = in_mach
self._in_area = in_area
self._in_p = in_p
self._in_t = in_t
self._p01 = p01
self._t01 = t01
self._z_len = z_len
self._t_len = t_len
@property
def t_len(self):
return self._t_len
@property
def in_mach(self):
return self._in_mach
@property
def in_area(self):
return self._in_area
@property
def in_p(self):
return self._in_p
@property
def in_t(self):
return self._in_t
@property
def p01(self):
return self._p01
@property
def p02(self):
return self.p01
def x2a(self, x):
return self._in_area
def x2y(self, x):
return self.x2a(x) / self._z_len / 2
def x2m(self, x):
return self.in_mach
def x2p(self, x):
return self.in_p / self.p01
@property
def t01(self):
return self._t01
def x2t(self, x):
return self.in_t / self.t01
def x2rho(self, x):
return self.x2p(x) / (self.x2t(x)) ** -1
| apache-2.0 | -5,986,436,818,786,117,000 | 17.551282 | 48 | 0.485142 | false |
CeltonMcGrath/TACTIC | 3rd_party/CherryPy/cherrypy/tutorial/tut06_default_method.py | 9 | 2118 | """
Tutorial - The default method
Request handler objects can implement a method called "default" that
is called when no other suitable method/object could be found.
Essentially, if CherryPy2 can't find a matching request handler object
for the given request URI, it will use the default method of the object
located deepest on the URI path.
Using this mechanism you can easily simulate virtual URI structures
by parsing the extra URI string, which you can access through
cherrypy.request.virtualPath.
The application in this tutorial simulates an URI structure looking
like /users/<username>. Since the <username> bit will not be found (as
there are no matching methods), it is handled by the default method.
"""
import cherrypy
class UsersPage:
def index(self):
# Since this is just a stupid little example, we'll simply
# display a list of links to random, made-up users. In a real
# application, this could be generated from a database result set.
return '''
<a href="./remi">Remi Delon</a><br/>
<a href="./hendrik">Hendrik Mans</a><br/>
<a href="./lorenzo">Lorenzo Lamas</a><br/>
'''
index.exposed = True
def default(self, user):
# Here we react depending on the virtualPath -- the part of the
# path that could not be mapped to an object method. In a real
# application, we would probably do some database lookups here
# instead of the silly if/elif/else construct.
if user == 'remi':
out = "Remi Delon, CherryPy lead developer"
elif user == 'hendrik':
out = "Hendrik Mans, CherryPy co-developer & crazy German"
elif user == 'lorenzo':
out = "Lorenzo Lamas, famous actor and singer!"
else:
out = "Unknown user. :-("
return '%s (<a href="./">back</a>)' % out
default.exposed = True
cherrypy.tree.mount(UsersPage())
if __name__ == '__main__':
import os.path
thisdir = os.path.dirname(__file__)
cherrypy.quickstart(config=os.path.join(thisdir, 'tutorial.conf'))
| epl-1.0 | -3,320,382,102,094,509,600 | 34.898305 | 74 | 0.653447 | false |
Jumpscale/jumpscale_core8 | lib/JumpScale/baselib/atyourservice81/Service.py | 1 | 30866 | from JumpScale import j
import capnp
import time
class Service:
def __init__(self, aysrepo, actor=None, model=None, name="", args={}, path=None):
"""
init from a template or from a model
"""
self.model = None
self._schema = None
self._path = ""
self._schema = None
self.name = name
self.aysrepo = aysrepo
self.logger = j.atyourservice.logger
if actor is not None:
try:
self._initFromActor(actor, args=args, name=name)
except:
# cleanup if init fails
self.delete()
raise
elif model is not None:
self.model = model
elif path is not None:
self.loadFromFS(path)
else:
raise j.exceptions.Input(
message="template or model needs to be specified when creating an actor", level=1, source="", tags="", msgpub="")
@property
def path(self):
if self._path == "":
relpath = self.model.dbobj.gitRepo.path
assert self.model.dbobj.gitRepo.url == self.aysrepo.git.remoteUrl
self._path = j.sal.fs.joinPaths(self.aysrepo.path, relpath)
return self._path
def _initFromActor(self, actor, name, args={}):
self.logger.info("init service %s from %s" % (name, actor.model.name))
if j.data.types.string.check(actor):
raise j.exceptions.RuntimeError("no longer supported, pass actor")
if actor is None:
raise j.exceptions.RuntimeError("service actor cannot be None")
self.model = self.aysrepo.db.services.new()
dbobj = self.model.dbobj
dbobj.name = name
dbobj.actorName = actor.model.dbobj.name
dbobj.actorKey = actor.model.key
dbobj.state = "new"
dbobj.dataSchema = actor.model.dbobj.serviceDataSchema
skey = "%s!%s" % (self.model.role, self.model.dbobj.name)
dbobj.gitRepo.url = self.aysrepo.git.remoteUrl
dbobj.gitRepo.path = j.sal.fs.joinPaths("services", skey)
# actions
actions = dbobj.init("actions", len(actor.model.dbobj.actions))
counter = 0
for action in actor.model.dbobj.actions:
actionnew = actions[counter]
actionnew.state = "new"
actionnew.actionKey = action.actionKey
actionnew.name = action.name
actionnew.log = action.log
actionnew.period = action.period
counter += 1
# set default value for argument not specified in blueprint
template = self.aysrepo.templateGet(actor.model.name)
for k, v in template.schemaHrd.items.items():
if k not in args:
args[k] = v.default
# input will always happen in process
args2 = self.input(args=args)
# print("%s:%s" % (self, args2))
if args2 is not None and j.data.types.dict.check(args2):
args = args2
if not j.data.types.dict.check(args):
raise j.exceptions.Input(message="result from input needs to be dict,service:%s" % self,
level=1, source="", tags="", msgpub="")
self._validate_service_args(args)
dbobj.data = j.data.capnp.getBinaryData(j.data.capnp.getObj(dbobj.dataSchema, args=args, name='Schema'))
# parents/producers
parent = self._initParent(actor, args)
if parent is not None:
fullpath = j.sal.fs.joinPaths(parent.path, skey)
newpath = j.sal.fs.pathRemoveDirPart(fullpath, self.aysrepo.path)
if j.sal.fs.exists(dbobj.gitRepo.path):
j.sal.fs.moveDir(dbobj.gitRepo.path, newpath)
dbobj.gitRepo.path = newpath
self._initProducers(actor, args)
self.save()
self.init()
# make sure we have the last version of the model if something changed during init
self.reload()
# need to do this manually cause execution of input method is a bit special.
self.model.actions['input'].state = 'ok'
self.saveAll()
def _validate_service_args(self, args):
"""
validate the arguments passed to the service during initialization to be sure we don't pass not defined arguments.
"""
errors = []
schema = j.data.capnp.getSchemaFromText(self.model.dbobj.dataSchema)
for field in args:
normalizedfieldname = j.data.hrd.sanitize_key(field)
if normalizedfieldname not in schema.schema.fieldnames:
errors.append('- Invalid parameter [{field}] passed while creating {service}.\n'.format(
field=field,
service="%s!%s" % (self.model.role, self.model.dbobj.name)))
if errors:
msg = "The arguments passed to the service contains the following errors: \n" + "\n".join(errors)
msg += '\nDataSchema : {}'.format(self.model.dbobj.dataSchema)
raise j.exceptions.Input(msg)
def _initParent(self, actor, args):
if actor.model.dbobj.parent.actorRole is not "":
parent_role = actor.model.dbobj.parent.actorRole
# try to get the instance name from the args. Look for full actor name ('node.ssh') or just role (node)
# if none of the two is available in the args, don't use instance name and
# expect the parent service to be unique in the repo
parent_name = args.get(actor.model.dbobj.parent.argKey, args.get(parent_role, ''))
res = self.aysrepo.servicesFind(name=parent_name, actor='%s(\..*)?' % parent_role)
res = [s for s in res if s.model.role == parent_role]
if len(res) == 0:
if actor.model.dbobj.parent.optional:
return None
if actor.model.dbobj.parent.auto is False:
raise j.exceptions.Input(message="could not find parent:%s for %s, found 0" %
(parent_name, self), level=1, source="", tags="", msgpub="")
else:
auto_actor = self.aysrepo.actorGet(parent_role)
instance = j.data.idgenerator.generateIncrID('parent_%s' % parent_role)
res.append(auto_actor.serviceCreate(instance="auto_%d" % instance, args={}))
elif len(res) > 1:
raise j.exceptions.Input(message="could not find parent:%s for %s, found more than 1." %
(parent_name, self), level=1, source="", tags="", msgpub="")
parentobj = res[0]
self.model.dbobj.parent.actorName = parentobj.model.dbobj.actorName
self.model.dbobj.parent.key = parentobj.model.key
self.model.dbobj.parent.serviceName = parentobj.name
return parentobj
return None
def _initProducers(self, actor, args):
"""
Initialize the producers of an actor.
actor: is the actor to init its producers.
args: passed arguments in the blueprint (i.e {'ssh1':'main', 'sshlist':[]} )
"""
# for every producer model in the producers, we get the user set services `argKey` to be consumed in the blueprint itself.
# calculate the difference of the available services and the user set
# calculate the min required services and see if we should create new ones if auto is set
# create the services required till the minServices is reached.
# set add each to our producers and add ourself the their consumers list.
# maintain the parent relationship (parent is always a producer and we are always a consumer of the parent.)
for producer_model in actor.model.dbobj.producers:
producer_role = producer_model.actorRole
usersetservices = []
passedservicesnames = args.get(producer_model.argKey, args.get(producer_role, ""))
if not j.data.types.list.check(passedservicesnames):
passedservicesnames = [passedservicesnames]
for svname in passedservicesnames:
if svname:
foundservices = self.aysrepo.servicesFind(name=svname, actor="%s(\..*)?" % producer_model.actorRole)
usersetservices.extend(foundservices)
available_services = self.aysrepo.servicesFind(actor=producer_role)
available_services = list(set(available_services)-set(usersetservices))
extraservices = len(usersetservices) - producer_model.maxServices
if extraservices > 0:
raise j.exceptions.Input(message="Specified services [%s] are more than maximum services: [%s]"%(str(usersetservices), str(producer_model.maxServices)),
level=1, source="", tags="", msgpub="")
tocreate = producer_model.minServices-len(available_services)-len(usersetservices)
if tocreate > 0:
if producer_model.auto:
for idx in range(tocreate):
auto_actor = self.aysrepo.actorGet(producer_role)
available_services.append(auto_actor.serviceCreate(instance="auto_%s" % idx, args={}))
else:
raise j.exceptions.Input(message="Minimum number of services required is %s and only %s are provided. [Hint: Maybe you want to set auto to auto create the missing services?]" % (producer_model.minServices, len(usersetservices)),
level=1, source="", tags="", msgpub="")
for idx, producer_obj in enumerate(usersetservices + available_services):
if idx >= len(usersetservices) and idx >= producer_model.minServices:
break
self.model.producerAdd(
actorName=producer_obj.model.dbobj.actorName,
serviceName=producer_obj.model.dbobj.name,
key=producer_obj.model.key)
# add ourself to the consumers list of the producer
producer_obj.model.consumerAdd(
actorName=self.model.dbobj.actorName,
serviceName=self.model.dbobj.name,
key=self.model.key)
if self.parent is not None:
# add parent to the producers list.
self.model.producerAdd(
actorName=self.parent.model.dbobj.actorName,
serviceName=self.parent.model.dbobj.name,
key=self.parent.model.key)
# add ourself to the consumers list of the parent
self.parent.model.consumerAdd(
actorName=self.model.dbobj.actorName,
serviceName=self.model.dbobj.name,
key=self.model.key)
def _check_args(self, actor, args):
""" Checks whether if args are the same as in instance model """
data = j.data.serializer.json.loads(self.model.dataJSON)
for key, value in args.items():
sanitized_key = j.data.hrd.sanitize_key(key)
if sanitized_key in data and data[sanitized_key] != value:
self.processChange(actor=actor, changeCategory="dataschema", args=args)
break
def loadFromFS(self, path):
"""
get content from fs and load in object
only for DR purposes, std from key value stor
"""
self.logger.debug("load service from FS: %s" % path)
if self.model is None:
self.model = self.aysrepo.db.services.new()
model_json = j.data.serializer.json.load(j.sal.fs.joinPaths(path, "service.json"))
# for now we don't reload the actions codes.
# when using distributed DB, the actions code could still be available
actions_bak = model_json.pop('actions')
self.model.dbobj = self.aysrepo.db.services.capnp_schema.new_message(**model_json)
data_json = j.data.serializer.json.load(j.sal.fs.joinPaths(path, "data.json"))
self.model.dbobj.data = j.data.capnp.getBinaryData(j.data.capnp.getObj(self.model.dbobj.dataSchema, args=data_json))
# data_obj = j.data.capnp.getObj(self.model.dbobj.dataSchema, data_json)
# self.model._data = data_obj
# actions
# relink actions from the actor to be sure we have good keys
actor = self.aysrepo.actorGet(name=self.model.dbobj.actorName)
actions = self.model.dbobj.init("actions", len(actor.model.dbobj.actions))
counter = 0
for action in actor.model.dbobj.actions:
for backup_action in actions_bak:
if action.name == backup_action['name']:
break
actionnew = actions[counter]
actionnew.state = backup_action['state']
actionnew.actionKey = action.actionKey
actionnew.name = action.name
actionnew.log = backup_action['log']
actionnew.period = backup_action['period']
counter += 1
self.saveAll()
def saveToFS(self):
j.sal.fs.createDir(self.path)
path2 = j.sal.fs.joinPaths(self.path, "service.json")
j.sal.fs.writeFile(path2, self.model.dictJson, append=False)
path3 = j.sal.fs.joinPaths(self.path, "data.json")
j.sal.fs.writeFile(path3, self.model.dataJSON)
path4 = j.sal.fs.joinPaths(self.path, "schema.capnp")
j.sal.fs.writeFile(path4, self.model.dbobj.dataSchema)
def save(self):
self.model.save()
def saveAll(self):
self.model.save()
self.saveToFS()
def reload(self):
self.model._data = None
self.model.load(self.model.key)
def delete(self):
"""
delete this service completly.
remove it from db and from filesystem
all the children of this service are going to be deleted too
"""
# TODO should probably warn user relation may be broken
for prod_model in self.model.producers:
prod_model.consumerRemove(self)
for cons_model in self.model.consumers:
cons_model.producerRemove(self)
for service in self.children:
service.delete()
self.model.delete()
j.sal.fs.removeDirTree(self.path)
@property
def parent(self):
if self.model.parent is not None:
return self.model.parent.objectGet(self.aysrepo)
return None
@property
def parents(self):
chain = []
parent = self.parent
while parent is not None:
chain.append(parent)
parent = parent.parent
return chain
@property
def children(self):
res = []
for service in self.aysrepo.services:
if service.parent == self:
res.append(service)
return res
@property
def producers(self):
producers = {}
for prod_model in self.model.producers:
if prod_model.role not in producers:
producers[prod_model.role] = []
result = self.aysrepo.servicesFind(name=prod_model.dbobj.name, actor=prod_model.dbobj.actorName)
producers[prod_model.role].extend(result)
return producers
@property
def consumers(self):
consumers = {}
for prod_model in self.model.consumers:
if prod_model.role not in consumers:
consumers[prod_model.role] = []
result = self.aysrepo.servicesFind(name=prod_model.dbobj.name, actor=prod_model.dbobj.actorName)
consumers[prod_model.role].extend(result)
return consumers
def isConsumedBy(self, service):
consumers_keys = [model.key for model in self.model.consumers]
return service.model.key in consumers_keys
def findConsumersRecursive(self, target=None, out=set()):
"""
@return set of services that consumes target, recursivlely
"""
if target is None:
target = self
for service in target.consumers:
out.add(service)
self.findConsumersRecursive(service, out)
return out
def getProducersRecursive(self, producers=set(), callers=set(), action="", producerRoles="*"):
for role, producers_list in self.producers.items():
for producer in producers_list:
if action == "" or action in producer.model.actionsState.keys():
if producerRoles == "*" or producer.model.role in producerRoles:
producers.add(producer)
producers = producer.getProducersRecursive(producers=producers, callers=callers, action=action, producerRoles=producerRoles)
return producers.symmetric_difference(callers)
def printProducersRecursive(self, prefix=""):
for role, producers2 in self.producers.items():
# print ("%s%s"%(prefix,role))
for producer in producers2:
print("%s- %s" % (prefix, producer))
producer.printProducersRecursive(prefix + " ")
def getConsumersRecursive(self, consumers=set(), callers=set(), action="", consumerRole="*"):
for role, consumers_list in self.consumers.items():
for consumer in consumers_list:
if action == "" or action in consumer.model.actionsState.keys():
if consumerRole == "*" or consumer.model.role in consmersRole:
consumers.add(consumer)
consumers = consumer.getConsumersRecursive(
consumers=consumers, callers=callers, action=action, consumerRole=consumerRole)
return consumers.symmetric_difference(callers)
def getConsumersWaiting(self, action='uninstall', consumersChanged=set(), scope=None):
for consumer in self.getConsumersRecursive(set(), set()):
# check that the action exists, no need to wait for other actions,
# appart from when init or install not done
if consumer.model.actionsState['init'] != "ok":
consumersChanged.add(consumer)
if consumer.model.actionsState['install'] != "ok":
consumersChanged.add(consumer)
if action not in consumer.model.actionsState.keys():
continue
if consumer.model.actionsState[action] != "ok":
consumersChanged.add(consumer)
if scope is not None:
consumersChanged = consumersChanged.intersection(scope)
return consumersChanged
def consume(self, service):
"""
consume another service dynamicly
"""
if service in self.producers:
return
self.model.producerAdd(
actorName=service.model.dbobj.actorName,
serviceName=service.name,
key=service.model.key)
# add ourself to the consumers list of the producer
service.model.consumerAdd(
actorName=self.model.dbobj.actorName,
serviceName=self.model.dbobj.name,
key=self.model.key)
self.saveAll()
service.saveAll()
@property
def executor(self):
return self._getExecutor()
def _getExecutor(self):
executor = None
tocheck = [self]
tocheck.extend(self.parents)
for service in tocheck:
if 'getExecutor' in service.model.actionsState.keys():
job = service.getJob('getExecutor')
executor = job.method(job)
return executor
return j.tools.executor.getLocal()
def processChange(self, actor, changeCategory, args={}):
"""
template action change
categories :
- dataschema
- ui
- config
- action_new_actionname
- action_mod_actionname
"""
# TODO: implement different pre-define action for each category
# self.logger.debug('process change for %s (%s)' % (self, changeCategory)
if changeCategory == 'dataschema':
# We use the args passed without change
pass
elif changeCategory == 'ui':
# TODO
pass
elif changeCategory == 'config':
# update the recurrin and event actions
# then set the lastrun to the value it was before update
recurring_lastrun = {}
event_lastrun = {}
for event in self.model.actionsEvent.values():
event_lastrun[event.action] = event.lastRun
for recurring in self.model.actionsRecurring.values():
recurring_lastrun[recurring.action] = recurring.lastRun
self._initRecurringActions(actor)
self._initEventActions(actor)
for action, lastRun in event_lastrun.items():
self.model.actionsEvent[action].lastRun = lastRun
for action, lastRun in recurring_lastrun.items():
self.model.actionsRecurring[action].lastRun = lastRun
elif changeCategory.find('action_new') != -1:
action_name = changeCategory.split('action_new_')[1]
actor_action_pointer = actor.model.actions[action_name]
self.model.actionAdd(key=actor_action_pointer.actionKey, name=action_name)
elif changeCategory.find('action_mod') != -1:
# update state and pointer of the action pointer in service model
action_name = changeCategory.split('action_mod_')[1]
action_actor_pointer = actor.model.actions[action_name]
service_action_pointer = self.model.actions[action_name]
service_action_pointer.state = 'changed'
service_action_pointer.actionKey = action_actor_pointer.actionKey
# update the lastModDate of the action object
action = j.core.jobcontroller.db.actions.get(key=service_action_pointer.actionKey)
action.dbobj.lastModDate = j.data.time.epoch
action.save()
elif changeCategory.find('action_del') != -1:
action_name = action_name = changeCategory.split('action_del_')[1]
self.model.actionDelete(action_name)
# save the change for the service
self.saveAll()
# execute the processChange method if it exists
if 'processChange' in self.model.actions.keys():
args.update({'changeCategory': changeCategory})
job = self.getJob("processChange", args=args)
args = job.executeInProcess()
job.model.save()
def input(self, args={}):
job = self.getJob("input", args=args)
job._service = self
job.saveService = False # this is done to make sure we don't save the service at this point !!!
args = job.executeInProcess()
job.model.actorName = self.model.dbobj.actorName
job.model.save()
return args
def init(self):
job = self.getJob(actionName="init")
job.executeInProcess()
job.model.save()
return job
def checkActions(self, actions):
"""
will walk over all actions, and make sure the default are well set.
"""
from IPython import embed
print("DEBUG NOW checkactions")
embed()
raise RuntimeError("stop debug here")
def scheduleAction(self, action, args={}, period=None, log=True, force=False):
"""
Change the state of an action so it marked as need to be executed
if the period is specified, also create a recurring period for the action
"""
self.logger.info('schedule action %s on %s' % (action, self))
if action not in self.model.actions:
raise j.exceptions.Input(
"Trying to schedule action %s on %s. but this action doesn't exist" % (action, self))
action_model = self.model.actions[action]
if action_model.state == 'disabled':
raise j.exceptions.Input("Trying to schedule action %s on %s. but this action is disabled" % (action, self))
if period is not None and period != '':
# convert period to seconds
if j.data.types.string.check(period):
period = j.data.types.duration.convertToSeconds(period)
elif j.data.types.int.check(period) or j.data.types.float.check(period):
period = int(period)
# save period into actionCode model
action_model.period = period
if not force and action_model.state == 'ok':
self.logger.info("action %s already in ok state, don't schedule again" % action_model.name)
else:
action_model.state = 'scheduled'
self.saveAll()
def executeAction(self, action, args={}, inprocess=False):
if action[-1] == "_":
return self.executeActionService(action)
else:
return self.executeActionJob(action, args, inprocess=inprocess)
def executeActionService(self, action, args={}):
# execute an action in process without creating a job
# usefull for methods called very often.
action_id = self.model.actions[action].actionKey
action_model = j.core.jobcontroller.db.actions.get(action_id)
action_with_lines = ("\n %s \n" % action_model.code)
indented_action = '\n '.join(action_with_lines.splitlines())
complete_action = "def %s(%s): %s" % (action, action_model.argsText, indented_action)
exec(complete_action)
res = eval(action)(service=self, args=args)
return res
def executeActionJob(self, actionName, args={}, inprocess=False):
self.logger.debug('execute action %s on %s' % (actionName, self))
job = self.getJob(actionName=actionName, args=args)
# inprocess means we don't want to create subprocesses for this job
# used mainly for action called from other actions.
if inprocess:
job.model.dbobj.debug = True
now = j.data.time.epoch
p = job.execute()
if job.model.dbobj.debug is True:
return job
while not p.isDone():
time.sleep(0.5)
p.sync()
if p.new_stdout != "":
self.logger.info(p.new_stdout)
# just to make sure process is cleared
p.wait()
# if the action is a reccuring action, save last execution time in model
if actionName in self.model.actionsRecurring:
self.model.actionsRecurring[actionName].lastRun = now
service_action_obj = self.model.actions[actionName]
if p.state != 'success':
job.model.dbobj.state = 'error'
service_action_obj.state = 'error'
# processError creates the logs entry in job object
job._processError(p.error)
# print error
log = job.model.dbobj.logs[-1]
print(job.str_error(log.log))
else:
job.model.dbobj.state = 'ok'
service_action_obj.state = 'ok'
log_enable = j.core.jobcontroller.db.actions.get(service_action_obj.actionKey).dbobj.log
if log_enable:
if p.stdout != '':
job.model.log(msg=p.stdout, level=5, category='out')
if p.stderr != '':
job.model.log(msg=p.stderr, level=5, category='err')
self.logger.info("job {} done sucessfuly".format(str(job)))
job.model.save()
job.service.saveAll()
return job
def getJob(self, actionName, args={}):
action = self.model.actions[actionName]
jobobj = j.core.jobcontroller.db.jobs.new()
jobobj.dbobj.repoKey = self.aysrepo.model.key
jobobj.dbobj.actionKey = action.actionKey
jobobj.dbobj.actionName = action.name
jobobj.dbobj.actorName = self.model.dbobj.actorName
jobobj.dbobj.serviceName = self.model.dbobj.name
jobobj.dbobj.serviceKey = self.model.key
jobobj.dbobj.state = "new"
jobobj.dbobj.lastModDate = j.data.time.epoch
jobobj.args = args
job = j.core.jobcontroller.newJobFromModel(jobobj)
return job
def _build_actions_chain(self, action):
"""
this method returns a list of action that need to happens before the action passed in argument
can start
"""
ds = list()
self.model._build_actions_chain(ds=ds)
ds.reverse()
return ds
def __eq__(self, service):
if not service:
return False
return service.model.key == self.model.key
def __hash__(self):
return hash(self.model.key)
def __repr__(self):
return "service:%s!%s" % (self.model.role, self.model.dbobj.name)
def __str__(self):
return self.__repr__()
def _getDisabledProducers(self):
disabled = []
for producers_list in self.producers.values():
for producer in producers_list:
if producer.model.dbobj.state == 'disabled':
disabled.append(producer)
return disabled
# def disable(self):
# for consumer in self.getConsumers():
# candidates = self.aysrepo.findServices(role=self.model.role, first=False)
# if len(candidates) > 1:
# # Other candidates available. Should link consumer to new
# # candidate
# candidates.remove(self)
# candidate = candidates[0]
# producers = consumer.hrd.getList('producer.%s' % self.role, [])
# producers.remove(self.key)
# producers.append(candidate.key)
# consumer.hrd.set('producer.%s' % self.role, producers)
# else:
# # No other candidates already installed. Disable consumer as
# # well.
# consumer.disable()
#
# self.log("disable instance")
# self.model.hrd.set('disabled', True)
#
# def _canBeEnabled(self):
# for role, producers in list(self.producers.items()):
# for producer in producers:
# if producer.state.hrd.getBool('disabled', False):
# return False
# return True
#
# def enable(self):
# # Check that all dependencies are enabled
#
# if not self._canBeEnabled():
# self.log(
# "%s cannot be enabled because one or more of its producers is disabled" % self)
# return
#
# self.model.hrd.set('disabled', False)
# self.log("Enable instance")
# for consumer in self._getConsumers(include_disabled=True):
# consumer.enable()
# consumer.start()
#
| apache-2.0 | 1,746,238,077,986,849,800 | 39.242503 | 248 | 0.592918 | false |
dstufft/ooni-backend | oonib/otime.py | 1 | 2318 | import time
from oonib import errors as e
from datetime import datetime
def utcDateNow():
"""
Returns the datetime object of the current UTC time.
"""
return datetime.utcnow()
def utcTimeNow():
"""
Returns seconds since epoch in UTC time, it's of type float.
"""
return time.mktime(time.gmtime())
def dateToTime(date):
"""
Takes as input a datetime object and outputs the seconds since epoch.
"""
return time.mktime(date.timetuple())
def prettyDateNow():
"""
Returns a good looking string for the local time.
"""
return datetime.now().ctime()
def utcPrettyDateNow():
"""
Returns a good looking string for utc time.
"""
return datetime.utcnow().ctime()
def timeToPrettyDate(time_val):
return time.ctime(time_val)
def fromTimestamp(s):
"""
Converts a string that is output from the timestamp function back to a
datetime object
Args:
s (str): a ISO8601 formatted string.
ex. 1912-06-23T101234Z"
Note: we currently only support parsing strings that are generated from the
timestamp function and have no intention in supporting the full
standard.
"""
try:
date_part, time_part = s.split('T')
hours, minutes, seconds = time_part[:2], time_part[2:4], time_part[4:6]
year, month, day = date_part.split('-')
except:
raise e.InvalidTimestampFormat(s)
return datetime(int(year),
int(month),
int(day),
int(hours),
int(minutes),
int(seconds))
def timestamp(t=None):
"""
The timestamp for ooni reports follows ISO 8601 in
UTC time format.
We do not inlcude ':' and include seconds.
Example:
if the current date is "10:12:34 AM, June 23 1912" (datetime(1912, 6,
23, 10, 12, 34))
the timestamp will be:
"1912-06-23T101234Z"
Args:
t (datetime): a datetime object representing the
time to be represented (*MUST* be expressed
in UTC).
If not specified will default to the current time
in UTC.
"""
if t is None:
t = datetime.utcnow()
ISO8601 = "%Y-%m-%dT%H%M%SZ"
return t.strftime(ISO8601)
| bsd-2-clause | -3,563,289,505,155,473,000 | 22.414141 | 79 | 0.595341 | false |
Alwnikrotikz/cortex-vfx | python/IECoreMaya/ParameterUI.py | 12 | 14067 | ##########################################################################
#
# Copyright (c) 2007-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds as cmds
import maya.mel
import maya.OpenMaya
import IECore
import IECoreMaya
## Base class for objects which are able to create an Attribute Editor widget for a single IECore.Parameter
# held on an IECoreMaya.ParameterisedHolder node.
# \todo Separate control drawing from labelling and layout, so these classes just create the right
# hand side of what they're doing at the moment. Then we can use them in different layouts like spreadsheets
# and wotnot.
class ParameterUI( IECoreMaya.UIElement ) :
textColumnWidthIndex = 145
singleWidgetWidthIndex = 70
sliderWidgetWidthIndex = 2 * 70
handlers = {}
## The parameterisedHolderNode is an MObject specifying the node holding the specified IECore.Parameter.
# Derived class __init__ implementations must create a layout to hold all their contents and pass this
# in the topLevelUI parameter (as for all UIElement derived classes).
# \todo Document the meaning of the various keyword arguments - perhaps the names of these should be
# prefixed with the name of the class which implements each argument so as to make it easier to find
# the documentation too.
def __init__( self, parameterisedHolderNode, parameter, topLevelUI, **kw ) :
IECoreMaya.UIElement.__init__( self, topLevelUI )
self.__node = maya.OpenMaya.MObjectHandle( parameterisedHolderNode )
self.parameter = parameter #IECore.Parameter
self.__labelWithNodeName = kw.get( "labelWithNodeName", False )
self.__longParameterName = kw.get( "longParameterName", parameter.name )
## Derived classes should override this method. The override should first call the base class method and
# then reconnect all created widgets to the new node/parameter. The node and parameter arguments are as
# for the __init__ function.
def replace( self, node, parameter ) :
self.__node = maya.OpenMaya.MObjectHandle( node )
self.parameter = parameter
## Returns the Maya node associated with this UI in the form of an OpenMaya.MObject
def node( self ) :
if not self.__node.isValid() :
raise RuntimeError, "IECoreMaya.ParameterUI.node(): The requested node is not valid"
return self.__node.object()
## Returns an umambiguous full path for the Maya node associated with this UI.
def nodeName( self ) :
fnPH = IECoreMaya.FnParameterisedHolder( self.node() )
return fnPH.fullPathName()
## Returns the Maya plug associated with this UI in the form an OpenMaya.MPlug
def plug( self ) :
fnPH = IECoreMaya.FnParameterisedHolder( self.node() )
return fnPH.parameterPlug( self.parameter )
## Returns an unambiguous full path to the plug this ui represents.
def plugName( self ) :
fnPH = IECoreMaya.FnParameterisedHolder( self.node() )
plug = fnPH.parameterPlug( self.parameter )
return str( fnPH.fullPathName() + "." + plug.partialName() )
def layout( self ) :
return self._topLevelUI()
## Computes a nice label for the ui.
def label( self ):
if self.__labelWithNodeName :
n = self.nodeName() + "." + self.__longParameterName
if not self.__longParameterName :
# Top-level parameter comes through into here without a name
n = self.nodeName() + ".parameters"
return IECoreMaya.mel( "interToUI(\"" + n + "\")" ).value
else :
return IECoreMaya.mel( "interToUI(\"" + self.parameter.name + "\")" ).value
## Computes a wrapped annotation/tooltip for the ui
def description( self ):
extended = "%s\n\n%s" % ( self.plugName().split(".")[1], self.parameter.description )
return IECore.StringUtil.wrap( extended, 48 )
@staticmethod
def _defaultDragCallback( dragControl, x, y, modifiers, **kw ):
# Pass the dictionary of arguments as a string so that it can be captured and eval'ed in the drop callback
return [ 'ParameterUI', repr( kw ) ]
def addDragCallback( self, ctrl, **kw ) :
maya.cmds.control(
ctrl,
edit = True,
dragCallback = IECore.curry( ParameterUI._defaultDragCallback, nodeName = self.nodeName(), layoutName = self.layout(), **kw )
)
## Can be called by derived classes to add a useful popup menu to the specified ui element. This
# will replace any existing popup menus that are already there.
## \todo Understand and document the available keyword arguments. I think the only one is "attributeName",
# which is used to allow the name of specific elements of compound plugs to be specified to improve the box
# and vector uis. That needs rethinking in any case, as we shouldn't be storing attribute names anywhere as it
# makes us vulnerable to the names changing behind our backs.
def _addPopupMenu( self, parentUI, **kw ) :
existingMenus = maya.cmds.control( parentUI, query=True, popupMenuArray=True )
if existingMenus :
for m in existingMenus :
maya.cmds.deleteUI( m, menu=True )
IECoreMaya.createMenu( definition = IECore.curry( self.__popupMenuDefinition, **kw ), parent = parentUI, useInterToUI=False )
if "button1" in kw and kw["button1"] :
IECoreMaya.createMenu( definition = IECore.curry( self.__popupMenuDefinition, **kw ), parent = parentUI, button = 1, useInterToUI=False )
## Returns an IECore.MenuDefinition used to create a popup menu for the ParameterUI. This may
# be overridden by derived classes to add their own menu items. In this case they should first
# call the base class implementation before adding their items to the result.
def _popupMenuDefinition( self, **kw ) :
definition = IECore.MenuDefinition()
if cmds.getAttr( kw['attributeName'], lock = True) == 0:
settable = maya.cmds.getAttr( kw["attributeName"], settable=True )
if settable :
# make menu items for all presets and for the default value
for k in self.parameter.presetNames() :
definition.append( "/" + k, { "command" : IECore.curry( self.__selectValue, selection = k ) } )
if len( self.parameter.presetNames() ) > 0 :
definition.append( "/PresetDivider", { "divider" : True } )
definition.append( "/Default", { "command" : IECore.curry( self.__selectValue, selection = self.parameter.defaultValue ) } )
definition.append( "/ValueDivider", { "divider" : True } )
attrType = cmds.getAttr( kw["attributeName"], type=True )
if attrType in ( "float", "long" ) :
if cmds.getAttr( kw['attributeName'], keyable=True) and settable :
definition.append( "/Set Key", { "command" : IECore.curry( self.__setKey, **kw ) } )
expressions = cmds.listConnections(
kw['attributeName'],
d = False,
s = True,
type = "expression"
)
if not expressions :
hasConnections = self.__appendConnectionMenuDefinitions( definition, **kw )
if not hasConnections and settable :
definition.append( "/Create New Expression...", { "command" : IECore.curry( self.__expressionEditor, **kw ) } )
else:
definition.append( "/Edit Expression...", { "command" : IECore.curry( self.__expressionEditor, **kw ) } )
definition.append( "/Delete Expression", { "command" : IECore.curry( self.__deleteNode, nodeName = expressions[0] ) } )
else :
self.__appendConnectionMenuDefinitions( definition, **kw )
definition.append( "/ConnectionDivider", { "divider" : True } )
definition.append( "/Lock Attribute", { "command" : IECore.curry( self.__lock, **kw ) } )
else :
definition.append( "/Unlock Attribute", { "command" : IECore.curry( self.__unlock, **kw ) } )
return definition
def __appendConnectionMenuDefinitions( self, definition, **kw ) :
connections = cmds.listConnections(
kw['attributeName'],
d = False,
s = True,
plugs = True,
connections = True,
skipConversionNodes = True
)
definition.append( "/Connection Editor...", { "command" : IECore.curry( self.__connectionEditor ) } )
if connections :
definition.append( "/Open AE...",
{ "command" : IECore.curry( self.__showEditor, attributeName = connections[1] ) }
)
definition.append( "/Break Connection",
{
"command" : IECore.curry(
self.__disconnect,
source = connections[1],
destination = connections[0],
refreshAE = self.nodeName()
)
}
)
return True
else:
return False
def __popupMenuDefinition( self, **kw ) :
# call the protected function which can be overridden by
# derived classes. then let the callbacks do what they want.
definition = self._popupMenuDefinition( **kw )
for cb in self.__popupMenuCallbacks :
cb( definition, self.parameter, self.node() )
return definition
def __showEditor( self, attributeName ) :
split = attributeName.split('.', 1 )
node = split[0]
melCmd = 'showEditor "' + node + '"'
IECoreMaya.mel( melCmd.encode('ascii') )
def __deleteNode( self, nodeName = None ) :
cmds.delete( nodeName )
def __expressionEditor( self, attributeName = None ) :
split = attributeName.split('.', 1 )
node = split[0]
attr = split[1]
melCmd = 'expressionEditor EE "' + node + '" "' + attr + '"'
IECoreMaya.mel( melCmd.encode('ascii') )
def __connectionEditor( self ) :
maya.mel.eval(
str("ConnectionEditor;"+
"nodeOutliner -e -replace %(right)s connectWindow|tl|cwForm|connectWindowPane|rightSideCW;"+
"connectWindowSetRightLabel %(right)s;") % { 'right' : self.nodeName() } )
def __disconnect( self, source = None, destination = None, refreshAE = None ) :
cmds.disconnectAttr( source, destination )
if refreshAE :
maya.mel.eval( 'evalDeferred( "updateAE %s;")' % refreshAE )
def __setKey( self, **kw ):
cmds.setKeyframe(
kw['attributeName']
)
def __lock( self, **kw ):
cmds.setAttr(
kw['attributeName'],
lock = True
)
def __unlock( self, **kw ):
cmds.setAttr(
kw['attributeName'],
lock = False
)
def __selectValue( self, selection = None):
self.parameter.setValue( selection )
IECoreMaya.FnParameterisedHolder( self.node() ).setNodeValue( self.parameter )
@staticmethod
def registerUI( parameterTypeId, handlerType, uiTypeHint = None ):
key = (parameterTypeId, uiTypeHint)
if key in ParameterUI.handlers :
IECore.msg( IECore.Msg.Level.Warning, "ParameterUI.registerUI", "Handler for %s already registered." % str( key ) )
ParameterUI.handlers[key] = handlerType
## Returns a new ParameterUI instance suitable for representing
# the specified parameter on the specified parameterisedHolderNode.
# The node may either be specified as an OpenMaya.MObject or as
# a string or unicode object representing the node name.
@staticmethod
def create( parameterisedHolderNode, parameter, **kw ) :
if not isinstance( parameterisedHolderNode, maya.OpenMaya.MObject ) :
parameterisedHolderNode = IECoreMaya.StringUtil.dependencyNodeFromString( parameterisedHolderNode )
if not parameter.isInstanceOf( IECore.Parameter.staticTypeId() ) :
raise TypeError( "Parameter argument must derive from IECore.Parameter." )
if parameter.presetsOnly and len( parameter.presets() ) :
return IECoreMaya.PresetsOnlyParameterUI( parameterisedHolderNode, parameter, **kw )
uiTypeHint = None
try:
uiTypeHint = parameter.userData()['UI']['typeHint'].value
except:
pass
handlerType = None
typeId = parameter.typeId()
while typeId!=IECore.TypeId.Invalid :
handlerType = ParameterUI.handlers.get( ( typeId, uiTypeHint ), None )
if handlerType is not None :
break
handlerType = ParameterUI.handlers.get( ( typeId, None ), None )
if handlerType is not None :
break
typeId = IECore.RunTimeTyped.baseTypeId( typeId )
if handlerType is None :
IECore.msg( IECore.Msg.Level.Warning, "ParameterUI.create", "No UI registered for parameters of type \"%s\"" % parameter.typeName() )
return None
if 'longParameterName' in kw and len( kw['longParameterName'] ) :
kw['longParameterName'] += "." + parameter.name
else :
kw['longParameterName'] = parameter.name
parameterUI = handlerType( parameterisedHolderNode, parameter, **kw )
return parameterUI
__popupMenuCallbacks = []
## Registers a callback which is able to modify the popup menus associated
# with ParameterUIs. The callback should have the following signature :
#
# callback( menuDefinition, parameter, holderNode ).
@classmethod
def registerPopupMenuCallback( cls, callback ) :
cls.__popupMenuCallbacks.append( callback )
| bsd-3-clause | 190,147,296,433,490,340 | 34.255639 | 140 | 0.699296 | false |
jaggu303619/asylum | openerp/addons/portal_crm/__openerp__.py | 55 | 1734 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal CRM',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds a contact page (with a contact form creating a lead when submitted) to your portal if crm and portal are installed.
====================================================================================================================================
""",
'author': 'OpenERP SA',
'depends': ['crm','portal'],
'data': [
'contact_view.xml',
],
'test': [
'test/contact_form.yml',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
'css': ['static/src/css/portal_crm.css'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,797,731,151,924,660,000 | 37.533333 | 132 | 0.546136 | false |
JuliBakagianni/META-SHARE | misc/tools/generateDS-2.7a/generate_coverage.py | 33 | 3800 | #!/usr/bin/env python
#
# Imports
import sys
from optparse import OptionParser
import os
import re
#
# Globals and constants
EXCLUDE_BASE = [
'MixedContainer',
'_MemberSpec',
]
PATTERN = "^class\s*(\w*)"
RE_PATTERN = re.compile(PATTERN)
#
# Functions for external use
def generate_coverage(outfile, infilename, options):
exclude_classes = EXCLUDE_BASE
exclude_classes.extend(options.exclude_additional.split())
wrt = outfile.write
mod_name = get_mod_name(infilename)
wrt(HEADER % (mod_name, ))
generate_coverage_1(wrt, infilename, mod_name, exclude_classes)
wrt(FOOTER)
def generate_coverage_1(wrt, infilename, mod_name, exclude_classes):
infile = open(infilename, 'r')
for line in infile:
mo = RE_PATTERN.search(line)
if mo:
name = mo.group(1)
if (name not in exclude_classes and
not name.startswith('Sax')):
wrt(" '%s': %s.%s,\n" % (name, mod_name, name, ))
infile.close()
def get_mod_name(infilename):
s1 = os.path.split(infilename)[1]
s2 = os.path.splitext(s1)[0]
return s2
#
# Classes
#
# Functions for internal use and testing
#
# Templates
HEADER = '''\
#!/usr/bin/env python
#
# Imports
import sys
from optparse import OptionParser
import %s
#
# Globals and constants
CLASSES = {
'''
FOOTER = '''\
}
#
# Functions for external use
#
# Classes
#
# Functions for internal use and testing
def test(verbose):
instances = []
for name, class_ in CLASSES.iteritems():
instance = class_()
instances.append(instance)
return instances
USAGE_TEXT = """
python %prog [options] <somefile.xxx>
example:
python %prog somefile.xxx"""
def usage(parser):
parser.print_help()
sys.exit(1)
def main():
parser = OptionParser(USAGE_TEXT)
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="produce additional (verbose) output")
(options, args) = parser.parse_args()
if len(args) != 0:
usage(parser)
test(options.verbose)
if __name__ == "__main__":
#import pdb; pdb.set_trace()
main()
'''
# End Templates
#
USAGE_TEXT = """
Generate a dictionary of class names and classes from a (superclass)
module generated by generateDS.py.
python %prog [options] <somefile.py>
Example:
python %prog somefile.py"""
def usage(parser):
parser.print_help()
sys.exit(1)
def main():
parser = OptionParser(USAGE_TEXT)
parser.add_option("-x", "--exclude-additional", type="string",
dest="exclude_additional", default='',
help="additional class names to be excluded (blank separated).")
parser.add_option("-f", "--force", action="store_true",
dest="force", default=False,
help="force over-write of outfile without asking.")
(options, args) = parser.parse_args()
outfilename = None
if len(args) == 2:
infilename = args[0]
outfilename = args[1]
elif len(args) == 1:
infilename = args[0]
outfilename = None
else:
usage(parser)
if outfilename is None:
outfile = sys.stdout
else:
if os.path.exists(outfilename):
if options.force:
outfile = open(outfilename, 'w')
else:
sys.stderr.write('Outfile (%s) exists. '
'Use -f (or --force) to override.\n' %
(outfilename, ))
sys.exit(1)
else:
outfile = open(outfilename, 'w')
generate_coverage(outfile, infilename, options)
if outfilename is not None:
outfile.close()
if __name__ == "__main__":
#import pdb; pdb.set_trace()
main()
| bsd-3-clause | 802,192,059,007,941,100 | 18.28934 | 72 | 0.594474 | false |
cernops/nova | nova/tests/functional/api_sample_tests/test_networks_associate.py | 9 | 3149 | # Copyright 2012 Nebula, Inc.
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class NetworksAssociateJsonTests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = "os-networks-associate"
extra_extensions_to_load = ["os-networks"]
_sentinel = object()
def _get_flags(self):
f = super(NetworksAssociateJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Networks_associate requires Networks to be update
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.os_networks.Os_networks')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.networks_associate.'
'Networks_associate')
return f
def setUp(self):
super(NetworksAssociateJsonTests, self).setUp()
def fake_associate(self, context, network_id,
host=NetworksAssociateJsonTests._sentinel,
project=NetworksAssociateJsonTests._sentinel):
return True
self.stub_out("nova.network.api.API.associate", fake_associate)
def test_disassociate(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-req',
{})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
def test_disassociate_host(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-host-req',
{})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
def test_disassociate_project(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-project-req',
{})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
def test_associate_host(self):
response = self._do_post('os-networks/1/action',
'network-associate-host-req',
{"host": "testHost"})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
| apache-2.0 | -847,080,795,879,207,200 | 38.860759 | 78 | 0.618927 | false |
linvictor88/vse-lbaas-driver | quantum/plugins/bigswitch/tests/test_server.py | 3 | 6568 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
"""Test server mocking a REST based network ctrl.
Used for QuantumRestProxy tests
"""
import json
import re
from wsgiref.simple_server import make_server
class TestNetworkCtrl(object):
def __init__(self, host='', port=8000,
default_status='404 Not Found',
default_response='404 Not Found',
debug=False):
self.host = host
self.port = port
self.default_status = default_status
self.default_response = default_response
self.debug = debug
self.debug_env = False
self.debug_resp = False
self.matches = []
def match(self, prior, method_regexp, uri_regexp, handler, data=None,
multi=True):
"""Add to the list of exptected inputs.
The incoming request is matched in the order of priority. For same
priority, match the oldest match request first.
:param prior: intgere priority of this match (e.g. 100)
:param method_regexp: regexp to match method (e.g. 'PUT|POST')
:param uri_regexp: regexp to match uri (e.g. '/quantum/v?.?/')
:param handler: function with signature:
lambda(method, uri, body, **kwargs) : status, body
where
- method: HTTP method for this request
- uri: URI for this HTTP request
- body: body of this HTTP request
- kwargs are:
- data: data object that was in the match call
- node: TestNetworkCtrl object itself
- id: offset of the matching tuple
and return values is:
(status, body) where:
- status: HTTP resp status (e.g. '200 OK').
If None, use default_status
- body: HTTP resp body. If None, use ''
"""
assert int(prior) == prior, 'Priority should an integer be >= 0'
assert prior >= 0, 'Priority should an integer be >= 0'
lo, hi = 0, len(self.matches)
while lo < hi:
mid = (lo + hi) // 2
if prior < self.matches[mid]:
hi = mid
else:
lo = mid + 1
self.matches.insert(lo, (prior, method_regexp, uri_regexp, handler,
data, multi))
def remove_id(self, id_):
assert id_ >= 0, 'remove_id: id < 0'
assert id_ <= len(self.matches), 'remove_id: id > len()'
self.matches.pop(id_)
def request_handler(self, method, uri, body):
retstatus = self.default_status
retbody = self.default_response
for i in xrange(len(self.matches)):
(prior, method_regexp, uri_regexp, handler, data, multi) = \
self.matches[i]
if re.match(method_regexp, method) and re.match(uri_regexp, uri):
kwargs = {
'data': data,
'node': self,
'id': i,
}
retstatus, retbody = handler(method, uri, body, **kwargs)
if multi is False:
self.remove_id(i)
break
if retbody is None:
retbody = ''
return (retstatus, retbody)
def server(self):
def app(environ, start_response):
uri = environ['PATH_INFO']
method = environ['REQUEST_METHOD']
headers = [('Content-type', 'text/json')]
content_len_str = environ['CONTENT_LENGTH']
content_len = 0
request_data = None
if content_len_str:
content_len = int(content_len_str)
request_data = environ.get('wsgi.input').read(content_len)
if request_data:
try:
request_data = json.loads(request_data)
except Exception:
# OK for it not to be json! Ignore it
pass
if self.debug:
print '\n'
if self.debug_env:
print '%s:' % 'environ:'
for (key, value) in sorted(environ.iteritems()):
print ' %16s : %s' % (key, value)
print '%s %s' % (method, uri)
if request_data:
print '%s' % (
json.dumps(request_data, sort_keys=True, indent=4))
status, body = self.request_handler(method, uri, None)
body_data = None
if body:
try:
body_data = json.loads(body)
except Exception:
# OK for it not to be json! Ignore it
pass
start_response(status, headers)
if self.debug:
if self.debug_env:
print '%s: %s' % ('Response',
json.dumps(body_data, sort_keys=True, indent=4))
return body
return make_server(self.host, self.port, app)
def run(self):
print "Serving on port %d ..." % self.port
try:
self.server().serve_forever()
except KeyboardInterrupt:
pass
if __name__ == "__main__":
import sys
port = 8899
if len(sys.argv) > 1:
port = int(sys.argv[1])
debug = False
if len(sys.argv) > 2:
if sys.argv[2].lower() in ['debug', 'true']:
debug = True
ctrl = TestNetworkCtrl(port=port,
default_status='200 OK',
default_response='{"status":"200 OK"}',
debug=debug)
ctrl.match(100, 'GET', '/test',
lambda m, u, b, **k: ('200 OK', '["200 OK"]'))
ctrl.run()
| apache-2.0 | -971,353,842,177,792,800 | 34.502703 | 78 | 0.515073 | false |
sankalpg/Essentia_tonicDebug_TEMP | src/python/essentia/extractor/segmentation_simple.py | 10 | 2872 | # Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import essentia
import numpy
import sys
from essentia import INFO
from essentia.progress import Progress
namespace = 'lowlevel'
dependencies = None
def is_silent_threshold(frame, silence_threshold_dB):
p = essentia.instantPower( frame )
silence_threshold = pow(10.0, (silence_threshold_dB / 10.0))
if p < silence_threshold:
return 1.0
else:
return 0.0
def compute(audio, pool, options):
# analysis parameters
sampleRate = options['sampleRate']
frameSize = options['frameSize']
hopSize = options['hopSize']
windowType = options['windowType']
# frame algorithms
frames = essentia.FrameGenerator(audio = audio, frameSize = frameSize, hopSize = hopSize)
window = essentia.Windowing(size = frameSize, zeroPadding = 0, type = windowType)
spectrum = essentia.Spectrum(size = frameSize)
# spectral algorithms
energy = essentia.Energy()
mfcc = essentia.MFCC(highFrequencyBound = 8000)
INFO('Computing Low-Level descriptors necessary for segmentation...')
# used for a nice progress display
total_frames = frames.num_frames()
n_frames = 0
start_of_frame = -frameSize*0.5
progress = Progress(total = total_frames)
for frame in frames:
frameScope = [ start_of_frame / sampleRate, (start_of_frame + frameSize) / sampleRate ]
#pool.setCurrentScope(frameScope)
pool.add(namespace + '.' + 'scope', frameScope)
if options['skipSilence'] and essentia.isSilent(frame):
total_frames -= 1
start_of_frame += hopSize
continue
frame_windowed = window(frame)
frame_spectrum = spectrum(frame_windowed)
# need the energy for getting the thumbnail
pool.add(namespace + '.' + 'spectral_energy', energy(frame_spectrum))
# mfcc
(frame_melbands, frame_mfcc) = mfcc(frame_spectrum)
pool.add(namespace + '.' + 'spectral_mfcc', frame_mfcc)
# display of progress report
progress.update(n_frames)
n_frames += 1
start_of_frame += hopSize
progress.finish()
| agpl-3.0 | -1,822,485,630,926,241,500 | 30.911111 | 95 | 0.685585 | false |
shubhdev/edx-platform | common/test/acceptance/tests/studio/test_studio_settings_details.py | 4 | 7591 | """
Acceptance tests for Studio's Settings Details pages
"""
from unittest import skip
from .base_studio_test import StudioCourseTest
from ...fixtures.course import CourseFixture
from ...pages.studio.settings import SettingsPage
from ...pages.studio.overview import CourseOutlinePage
from ...tests.studio.base_studio_test import StudioCourseTest
from ..helpers import (
generate_course_key,
select_option_by_value,
is_option_value_selected,
element_has_text,
)
class SettingsMilestonesTest(StudioCourseTest):
"""
Tests for milestones feature in Studio's settings tab
"""
def setUp(self, is_staff=True):
super(SettingsMilestonesTest, self).setUp(is_staff=is_staff)
self.settings_detail = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Before every test, make sure to visit the page first
self.settings_detail.visit()
self.assertTrue(self.settings_detail.is_browser_on_page())
def test_page_has_prerequisite_field(self):
"""
Test to make sure page has pre-requisite course field if milestones app is enabled.
"""
self.assertTrue(self.settings_detail.pre_requisite_course_options)
def test_prerequisite_course_save_successfully(self):
"""
Scenario: Selecting course from Pre-Requisite course drop down save the selected course as pre-requisite
course.
Given that I am on the Schedule & Details page on studio
When I select an item in pre-requisite course drop down and click Save Changes button
Then My selected item should be saved as pre-requisite course
And My selected item should be selected after refreshing the page.'
"""
course_number = self.unique_id
CourseFixture(
org='test_org',
number=course_number,
run='test_run',
display_name='Test Course' + course_number
).install()
pre_requisite_course_key = generate_course_key(
org='test_org',
number=course_number,
run='test_run'
)
pre_requisite_course_id = unicode(pre_requisite_course_key)
# Refresh the page to load the new course fixture and populate the prrequisite course dropdown
# Then select the prerequisite course and save the changes
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again and confirm the prerequisite course selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
self.assertTrue(is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
))
# Set the prerequisite course back to None and save the changes
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=''
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the None selection is properly reflected
self.settings_detail.refresh_page()
self.settings_detail.wait_for_prerequisite_course_options()
self.assertTrue(is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=''
))
# Re-pick the prerequisite course and confirm no errors are thrown (covers a discovered bug)
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the prerequisite course selection is properly reflected
self.settings_detail.refresh_page()
dropdown_status = is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.assertTrue(dropdown_status)
def test_page_has_enable_entrance_exam_field(self):
"""
Test to make sure page has 'enable entrance exam' field.
"""
self.assertTrue(self.settings_detail.entrance_exam_field)
@skip('Passes in devstack, passes individually in Jenkins, fails in suite in Jenkins.')
def test_enable_entrance_exam_for_course(self):
"""
Test that entrance exam should be created after checking the 'enable entrance exam' checkbox.
And also that the entrance exam is destroyed after deselecting the checkbox.
"""
self.settings_detail.require_entrance_exam(required=True)
self.settings_detail.save_changes()
# getting the course outline page.
course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
course_outline_page.visit()
# title with text 'Entrance Exam' should be present on page.
self.assertTrue(element_has_text(
page=course_outline_page,
css_selector='span.section-title',
text='Entrance Exam'
))
# Delete the currently created entrance exam.
self.settings_detail.visit()
self.settings_detail.require_entrance_exam(required=False)
self.settings_detail.save_changes()
course_outline_page.visit()
self.assertFalse(element_has_text(
page=course_outline_page,
css_selector='span.section-title',
text='Entrance Exam'
))
def test_entrance_exam_has_unit_button(self):
"""
Test that entrance exam should be created after checking the 'enable entrance exam' checkbox.
And user has option to add units only instead of any Subsection.
"""
self.settings_detail.require_entrance_exam(required=True)
self.settings_detail.save_changes()
# getting the course outline page.
course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
course_outline_page.visit()
course_outline_page.wait_for_ajax()
# button with text 'New Unit' should be present.
self.assertTrue(element_has_text(
page=course_outline_page,
css_selector='.add-item a.button-new',
text='New Unit'
))
# button with text 'New Subsection' should not be present.
self.assertFalse(element_has_text(
page=course_outline_page,
css_selector='.add-item a.button-new',
text='New Subsection'
))
| agpl-3.0 | -5,740,725,429,276,370,000 | 37.928205 | 113 | 0.642735 | false |
torresj/cafe | contacto.py | 2 | 7363 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 3 13:07:55 2013
@author: jaime
"""
import web
from web.contrib.template import render_mako
from web import form
import pymongo
import feedparser
import time
from keys import *
import tweepy
render = render_mako(
directories=['plantillas'],
input_encoding='utf-8',
output_encoding='utf-8',
)
'''
Esta funcion sirve para actualizar el tiempo del ultimo
acceso al rss, si fuera necesario. Comprobara si han pasado
mas de 10 minutos desde la ultima vez, y si es asi, volverá
a descargar el rss
'''
def actualiza_tiempo():
conn=pymongo.MongoClient()
db=conn.mydb
cache=db.cache
tiempo1=time.time()
t=cache.find_one({"rss":"el pais"})
tiempo2=t[u'ult_act']
if((tiempo2- tiempo1)>600):
cache.update({"rss": "el pais"}, {"$set": {"ult_act": time.time()}})
rss=feedparser.parse('http://ep00.epimg.net/rss/tags/ultimas_noticias.xml')
conn.close()
def actualiza_tweet():
conn=pymongo.MongoClient()
db=conn.mydb
cache=db.cache
tiempo1=time.time()
t=cache.find_one({"rss":"tweet"})
tiempo2=t[u'ult_act']
if((tiempo2- tiempo1)>600):
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
result = api.user_timeline("torresjTIC")
tweet=[]
for status in result:
geo=status.geo
if geo!=None:
tweet.append([status.text,[geo[u'coordinates'][0],geo[u'coordinates'][1]]])
cache.update({"rss": "tweet"}, {"$set": {"ult_act": time.time()}})
conn.close()
#Variable para RSS, también almacenamos el momento en que se descargo el rss
rss=feedparser.parse('http://ep00.epimg.net/rss/tags/ultimas_noticias.xml')
actualiza_tiempo()
#Conectamos con tweeter para obtener los twits
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
result = api.user_timeline("torresjTIC")
tweet=[]
for status in result:
geo=status.geo
if geo!=None:
print status.text
tweet.append([status.text,[geo[u'coordinates'][0],geo[u'coordinates'][1]]])
actualiza_tweet()
# funciones para usar como listas de dias meses y años
def dias():
x=[];
for n in range(1,32):
x.append(n)
return x
def meses():
x=[];
for n in range(1,13):
x.append(n)
return x
def anios():
x=[];
for n in range(1940,2014):
x.append(n)
return x
meses31=['1','3','4','7','8','10','12']
meses30=['5','6','9','11']
#Validadores
vpass=form.regexp(r'.{7,20}$',"La contrasenia debe tener mas de 7 caracteres")
vemail=form.regexp(r'\b[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}\b',"Introduzca una direccion de correo valida")
#Formulario Para el login
formul = form.Form(
form.Textbox("user",form.notnull,description = "Usuario:"),
form.Password("password",form.notnull,vpass,description = "Contraseña:"),
form.Button("Login")
)
class Contacto:
def GET(self):
s=web.ctx.session
try:
if s.usuario!='':
log=True
user=s.usuario
else:
log=False
user=''
except AttributeError:
s.usuario=''
log=False
user=''
#Variables para rellenar la pagina web
login=formul()
registro=0
titulo="CAFE DEL MAR"
subtitulo1="Oferta de cafes"
cafes=[["Cafe1","Descripcion del cafe 1"],["Cafe2","Descripcion del cafe 2"],["Cafe3","Descripcion del cafe 3"],["Cafe4","Descripcion del cafe 4"]]
cafeEspecial=["Cafe especial de la casa","Descripcion cafe especial de la casa"]
piepagina="Copyright © 2013 Jaime Torres Benavente"
subtitulo2="Localizacion"
cuerpo="Cuerpoooooooooooooooooooooo"
subtitulo3=""
subtitulo4=""
servicios=[]
reg=False
modo="contacto"
error=''
actualiza_tiempo()
actualiza_tweet()
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss,
tweet=tweet)
def POST(self):
login=formul()
registro=0
titulo="CAFE DEL MAR"
subtitulo1="Oferta de cafes"
cafes=[["Cafe1","Descripcion del cafe 1"],["Cafe2","Descripcion del cafe 2"],["Cafe3","Descripcion del cafe 3"],["Cafe4","Descripcion del cafe 4"]]
cafeEspecial=["Cafe especial de la casa","Descripcion cafe especial de la casa"]
piepagina="Copyright © 2013 Jaime Torres Benavente"
subtitulo2="Localizacion"
cuerpo="Cuerpo00oooooo"
subtitulo3=""
subtitulo4=""
servicios=[]
reg=False
modo="contacto"
error=''
actualiza_tiempo()
actualiza_tweet()
if not login.validates():
log=False
user=''
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss,
tweet=tweet)
else:
s=web.ctx.session
#buscamos al usuario en la base de datos
conn=pymongo.MongoClient()
db=conn.mydb
usuarios=db.usuarios
us=usuarios.find_one({"user":login['user'].value})
conn.close()
try:
if login['password'].value==us[u'pass']:
log=True
user=login['user'].value
s.usuario=user
else:
log=False
user=''
error='contrasña erronea'
except TypeError:
log=False;
user=''
error='El usuario no existe'
return render.plantilla(
titulo=titulo,
login=login,
log=log,
user=user,
subtitulo1=subtitulo1,
cafes=cafes,
cafeEspecial=cafeEspecial,
subtitulo2=subtitulo2,
cuerpo=cuerpo,
registro=registro,
subtitulo3=subtitulo3,
subtitulo4=subtitulo4,
servicios=servicios,
piepagina=piepagina,
reg=reg,
modo=modo,
error=error,
rss=rss,
tweet=tweet) | gpl-2.0 | 1,535,667,005,186,142,700 | 27.307692 | 155 | 0.555374 | false |
hynekcer/django | tests/foreign_object/test_empty_join.py | 232 | 1498 | from django.test import TestCase
from .models import SlugPage
class RestrictedConditionsTests(TestCase):
def setUp(self):
slugs = [
'a',
'a/a',
'a/b',
'a/b/a',
'x',
'x/y/z',
]
SlugPage.objects.bulk_create([SlugPage(slug=slug) for slug in slugs])
def test_restrictions_with_no_joining_columns(self):
"""
Test that it's possible to create a working related field that doesn't
use any joining columns, as long as an extra restriction is supplied.
"""
a = SlugPage.objects.get(slug='a')
self.assertListEqual(
[p.slug for p in SlugPage.objects.filter(ascendants=a)],
['a', 'a/a', 'a/b', 'a/b/a'],
)
self.assertEqual(
[p.slug for p in a.descendants.all()],
['a', 'a/a', 'a/b', 'a/b/a'],
)
aba = SlugPage.objects.get(slug='a/b/a')
self.assertListEqual(
[p.slug for p in SlugPage.objects.filter(descendants__in=[aba])],
['a', 'a/b', 'a/b/a'],
)
self.assertListEqual(
[p.slug for p in aba.ascendants.all()],
['a', 'a/b', 'a/b/a'],
)
def test_empty_join_conditions(self):
x = SlugPage.objects.get(slug='x')
message = "Join generated an empty ON clause."
with self.assertRaisesMessage(ValueError, message):
list(SlugPage.objects.filter(containers=x))
| bsd-3-clause | 1,918,732,513,960,612,900 | 30.87234 | 78 | 0.530708 | false |
ppyordanov/HCI_4_Future_Cities | Server/src/virtualenv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/fields.py | 1 | 5990 | # urllib3/fields.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from parameter
of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type)
tuple where the MIME type is optional. For example: ::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format as
`k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None, content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join(
['', self._render_parts((('name', self._name), ('filename', self._filename)))])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| mit | -6,864,710,002,359,419,000 | 32.463687 | 97 | 0.599165 | false |
malja/cvut-python | cviceni07/frequency.py | 1 | 1393 | import sys
from collections import Counter
def loadFile( filename ):
words = []
with open(filename) as file:
for line in file:
# Kvůli poslednímu slovu
line += " "
word = ""
for char in line:
if not char.isalpha():
if len(word) > 0:
words.append( word.lower() )
word = ""
continue
word += char
return words
def countWordFrequencies( words ):
words_sorted = sorted( words )
unique_words = sorted( set( words_sorted ) )
frequencies = []
for unique in unique_words:
frequencies.append(0)
while True:
if words_sorted[0] == unique:
frequencies[-1] += 1
words_sorted.pop(0)
if len( words_sorted ) == 0:
break
else:
break
return frequencies, unique_words
def printOutput( frequencies, words ):
max_frequency = max( frequencies )
for i in range( len( words ) ):
print( "{:>14}:".format( words[i] ), "*" * int( ( 50* frequencies[i] ) / max_frequency ), sep="" )
array = loadFile( sys.argv[1] )
if ( len(array) == 0 ):
exit()
freq, uniq = countWordFrequencies( array )
printOutput( freq, uniq ) | mit | 2,125,759,338,699,850,800 | 21.095238 | 106 | 0.485981 | false |
erikdejonge/newsrivr | daemons/d_checkdoubleusernames.py | 1 | 3053 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from d_utils import *
def clog(s):
s= str(s)
print('\033[%96m'+strftime("%Y-%m-%d %H:%M:%S", gmtime())+": "+s+'\033[%0m')
def checkDoubleUsernames(exis_user_screen_name):
users_same_name = []
# find out if there are more users with the same screenname, this can happen after an deny.
crs = getCollUsers().find({"screen_name":exis_user_screen_name}, sort=[("date_created", 1)])
for i in crs:
users_same_name.append(i)
#TODO dit checken tegen d_sitenewusers
old_newsrivr_userid_md5 = None
new_newsrivr_userid_md5 = None
if len(users_same_name)>1:
old_newsrivr_userid_md5 = users_same_name[0]["newsrivr_userid_md5"]
new_newsrivr_userid_md5 = users_same_name[len(users_same_name)-1]["newsrivr_userid_md5"]
if "closed_drops" in users_same_name[0]:
users_same_name[len(users_same_name)-1]["closed_drops"] = users_same_name[0]["closed_drops"]
if "share_data" in users_same_name[0]:
users_same_name[len(users_same_name)-1]["share_data"] = users_same_name[0]["share_data"]
getCollUsers().save(users_same_name[len(users_same_name)-1], safe=True)
else:
return
if old_newsrivr_userid_md5 and new_newsrivr_userid_md5:
cnt = 0
for d in getCollDrops().find({"newsrivr_userid_md5":old_newsrivr_userid_md5}):
d["newsrivr_userid_md5"] = list(set(d["newsrivr_userid_md5"]))
d["newsrivr_userid_md5"].remove(old_newsrivr_userid_md5)
d["newsrivr_userid_md5"].append(new_newsrivr_userid_md5)
for i in getCollDrops().find({"id_str":d["id_str"]}):
if i["_id"]!=d["_id"]:
d["newsrivr_userid_md5"].extend(i["newsrivr_userid_md5"])
d["newsrivr_userid_md5"] = list(set(d["newsrivr_userid_md5"]))
getCollDrops().remove({"_id":pymongo.objectid.ObjectId(i["_id"])}, safe=True)
getCollDrops().save(d, safe=True)
cnt += 1
if cnt%100==0:
clog("user changed md5, correcting: "+ str(cnt))
for u in users_same_name:
if u["newsrivr_userid_md5"]!=new_newsrivr_userid_md5:
getCollUsers().remove({"_id":pymongo.objectid.ObjectId(u["_id"])}, safe=True)
drops_to_remove = []
for d in getCollDrops().find({"newsrivr_userid_md5":u["newsrivr_userid_md5"]}):
d["newsrivr_userid_md5"] = list(set(d["newsrivr_userid_md5"]))
d["newsrivr_userid_md5"].remove(u["newsrivr_userid_md5"])
if len(d["newsrivr_userid_md5"])==0:
drops_to_remove.append(d["id_str"])
else:
if getCollDrops().find(d).count()>0:
getCollDrops().remove(d, safe=True)
else:
getCollDrops().save(d, safe=True)
deleteDrops(drops_to_remove)
def main():
while True:
for u in getCollUsers().find():
if "screen_name" in u:
checkDoubleUsernames(u["screen_name"])
time.sleep(20)
if __name__=="__main__":
clog("check if double names exists")
driver(main, inspect.getfile(inspect.currentframe()))
| gpl-2.0 | -375,972,500,351,243,840 | 38.141026 | 95 | 0.666885 | false |
nikoonia/gem5v | configs/common/Benchmarks.py | 36 | 6206 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
from SysPaths import script, disk, binary
from os import environ as env
from m5.defines import buildEnv
class SysConfig:
def __init__(self, script=None, mem=None, disk=None):
self.scriptname = script
self.diskname = disk
self.memsize = mem
def script(self):
if self.scriptname:
return script(self.scriptname)
else:
return ''
def mem(self):
if self.memsize:
return self.memsize
else:
return '128MB'
def disk(self):
if self.diskname:
return disk(self.diskname)
elif buildEnv['TARGET_ISA'] == 'alpha':
return env.get('LINUX_IMAGE', disk('linux-latest.img'))
elif buildEnv['TARGET_ISA'] == 'x86':
return env.get('LINUX_IMAGE', disk('x86root.img'))
elif buildEnv['TARGET_ISA'] == 'arm':
return env.get('LINUX_IMAGE', disk('linux-arm-ael.img'))
else:
print "Don't know what default disk image to use for %s ISA" % \
buildEnv['TARGET_ISA']
exit(1)
# Benchmarks are defined as a key in a dict which is a list of SysConfigs
# The first defined machine is the test system, the others are driving systems
Benchmarks = {
'PovrayBench': [SysConfig('povray-bench.rcS', '512MB', 'povray.img')],
'PovrayAutumn': [SysConfig('povray-autumn.rcS', '512MB', 'povray.img')],
'NetperfStream': [SysConfig('netperf-stream-client.rcS'),
SysConfig('netperf-server.rcS')],
'NetperfStreamUdp': [SysConfig('netperf-stream-udp-client.rcS'),
SysConfig('netperf-server.rcS')],
'NetperfUdpLocal': [SysConfig('netperf-stream-udp-local.rcS')],
'NetperfStreamNT': [SysConfig('netperf-stream-nt-client.rcS'),
SysConfig('netperf-server.rcS')],
'NetperfMaerts': [SysConfig('netperf-maerts-client.rcS'),
SysConfig('netperf-server.rcS')],
'SurgeStandard': [SysConfig('surge-server.rcS', '512MB'),
SysConfig('surge-client.rcS', '256MB')],
'SurgeSpecweb': [SysConfig('spec-surge-server.rcS', '512MB'),
SysConfig('spec-surge-client.rcS', '256MB')],
'Nhfsstone': [SysConfig('nfs-server-nhfsstone.rcS', '512MB'),
SysConfig('nfs-client-nhfsstone.rcS')],
'Nfs': [SysConfig('nfs-server.rcS', '900MB'),
SysConfig('nfs-client-dbench.rcS')],
'NfsTcp': [SysConfig('nfs-server.rcS', '900MB'),
SysConfig('nfs-client-tcp.rcS')],
'IScsiInitiator': [SysConfig('iscsi-client.rcS', '512MB'),
SysConfig('iscsi-server.rcS', '512MB')],
'IScsiTarget': [SysConfig('iscsi-server.rcS', '512MB'),
SysConfig('iscsi-client.rcS', '512MB')],
'Validation': [SysConfig('iscsi-server.rcS', '512MB'),
SysConfig('iscsi-client.rcS', '512MB')],
'Ping': [SysConfig('ping-server.rcS',),
SysConfig('ping-client.rcS')],
'ValAccDelay': [SysConfig('devtime.rcS', '512MB')],
'ValAccDelay2': [SysConfig('devtimewmr.rcS', '512MB')],
'ValMemLat': [SysConfig('micro_memlat.rcS', '512MB')],
'ValMemLat2MB': [SysConfig('micro_memlat2mb.rcS', '512MB')],
'ValMemLat8MB': [SysConfig('micro_memlat8mb.rcS', '512MB')],
'ValMemLat': [SysConfig('micro_memlat8.rcS', '512MB')],
'ValTlbLat': [SysConfig('micro_tlblat.rcS', '512MB')],
'ValSysLat': [SysConfig('micro_syscall.rcS', '512MB')],
'ValCtxLat': [SysConfig('micro_ctx.rcS', '512MB')],
'ValStream': [SysConfig('micro_stream.rcS', '512MB')],
'ValStreamScale': [SysConfig('micro_streamscale.rcS', '512MB')],
'ValStreamCopy': [SysConfig('micro_streamcopy.rcS', '512MB')],
'MutexTest': [SysConfig('mutex-test.rcS', '128MB')],
'ArmAndroid-GB': [SysConfig('null.rcS', '256MB',
'ARMv7a-Gingerbread-Android.SMP.mouse.nolock.clean.img')],
'bbench-gb': [SysConfig('bbench-gb.rcS', '256MB',
'ARMv7a-Gingerbread-Android.SMP.mouse.nolock.img')],
'ArmAndroid-ICS': [SysConfig('null.rcS', '256MB',
'ARMv7a-ICS-Android.SMP.nolock.clean.img')],
'bbench-ics': [SysConfig('bbench-ics.rcS', '256MB',
'ARMv7a-ICS-Android.SMP.nolock.img')]
}
benchs = Benchmarks.keys()
benchs.sort()
DefinedBenchmarks = ", ".join(benchs)
| bsd-3-clause | -5,316,038,759,171,885,000 | 48.253968 | 78 | 0.621495 | false |
fjbatresv/odoo | addons/website_sale_delivery/controllers/main.py | 124 | 1551 | # -*- coding: utf-8 -*-
import openerp
from openerp import http
from openerp.http import request
import openerp.addons.website_sale.controllers.main
class website_sale(openerp.addons.website_sale.controllers.main.website_sale):
@http.route(['/shop/payment'], type='http', auth="public", website=True)
def payment(self, **post):
cr, uid, context = request.cr, request.uid, request.context
order = request.website.sale_get_order(context=context)
carrier_id = post.get('carrier_id')
if carrier_id:
carrier_id = int(carrier_id)
if order:
request.registry['sale.order']._check_carrier_quotation(cr, uid, order, force_carrier_id=carrier_id, context=context)
if carrier_id:
return request.redirect("/shop/payment")
res = super(website_sale, self).payment(**post)
return res
def order_lines_2_google_api(self, order_lines):
""" Transforms a list of order lines into a dict for google analytics """
order_lines_not_delivery = [line for line in order_lines if not line.is_delivery]
return super(website_sale, self).order_lines_2_google_api(order_lines_not_delivery)
def order_2_return_dict(self, order):
""" Returns the tracking_cart dict of the order for Google analytics """
ret = super(website_sale, self).order_2_return_dict(order)
for line in order.order_line:
if line.is_delivery:
ret['transaction']['shipping'] = line.price_unit
return ret
| agpl-3.0 | 837,645,498,407,480,600 | 42.083333 | 129 | 0.652482 | false |
bcarroll/authmgr | python-3.6.2-Win64/Lib/site-packages/werkzeug/contrib/securecookie.py | 91 | 12174 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.securecookie
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a cookie that is not alterable from the client
because it adds a checksum the server checks for. You can use it as
session replacement if all you have is a user id or something to mark
a logged in user.
Keep in mind that the data is still readable from the client as a
normal cookie is. However you don't have to store and flush the
sessions you have at the server.
Example usage:
>>> from werkzeug.contrib.securecookie import SecureCookie
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
Dumping into a string so that one can store it in a cookie:
>>> value = x.serialize()
Loading from that string again:
>>> x = SecureCookie.unserialize(value, "deadbeef")
>>> x["baz"]
(1, 2, 3)
If someone modifies the cookie and the checksum is wrong the unserialize
method will fail silently and return a new empty `SecureCookie` object.
Keep in mind that the values will be visible in the cookie so do not
store data in a cookie you don't want the user to see.
Application Integration
=======================
If you are using the werkzeug request objects you could integrate the
secure cookie into your application like this::
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseRequest
from werkzeug.contrib.securecookie import SecureCookie
# don't use this key but a different one; you could just use
# os.urandom(20) to get something random
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
class Request(BaseRequest):
@cached_property
def client_session(self):
data = self.cookies.get('session_data')
if not data:
return SecureCookie(secret_key=SECRET_KEY)
return SecureCookie.unserialize(data, SECRET_KEY)
def application(environ, start_response):
request = Request(environ)
# get a response object here
response = ...
if request.client_session.should_save:
session_data = request.client_session.serialize()
response.set_cookie('session_data', session_data,
httponly=True)
return response(environ, start_response)
A less verbose integration can be achieved by using shorthand methods::
class Request(BaseRequest):
@cached_property
def client_session(self):
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
def application(environ, start_response):
request = Request(environ)
# get a response object here
response = ...
request.client_session.save_cookie(response)
return response(environ, start_response)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import pickle
import base64
from hmac import new as hmac
from time import time
from hashlib import sha1 as _default_hash
from werkzeug._compat import iteritems, text_type
from werkzeug.urls import url_quote_plus, url_unquote_plus
from werkzeug._internal import _date_to_unix
from werkzeug.contrib.sessions import ModificationTrackingDict
from werkzeug.security import safe_str_cmp
from werkzeug._compat import to_native
class UnquoteError(Exception):
"""Internal exception used to signal failures on quoting."""
class SecureCookie(ModificationTrackingDict):
"""Represents a secure cookie. You can subclass this class and provide
an alternative mac method. The import thing is that the mac method
is a function with a similar interface to the hashlib. Required
methods are update() and digest().
Example usage:
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
>>> x["foo"]
42
>>> x["baz"]
(1, 2, 3)
>>> x["blafasel"] = 23
>>> x.should_save
True
:param data: the initial data. Either a dict, list of tuples or `None`.
:param secret_key: the secret key. If not set `None` or not specified
it has to be set before :meth:`serialize` is called.
:param new: The initial value of the `new` flag.
"""
#: The hash method to use. This has to be a module with a new function
#: or a function that creates a hashlib object. Such as `hashlib.md5`
#: Subclasses can override this attribute. The default hash is sha1.
#: Make sure to wrap this in staticmethod() if you store an arbitrary
#: function there such as hashlib.sha1 which might be implemented
#: as a function.
hash_method = staticmethod(_default_hash)
#: the module used for serialization. Unless overriden by subclasses
#: the standard pickle module is used.
serialization_method = pickle
#: if the contents should be base64 quoted. This can be disabled if the
#: serialization process returns cookie safe strings only.
quote_base64 = True
def __init__(self, data=None, secret_key=None, new=True):
ModificationTrackingDict.__init__(self, data or ())
# explicitly convert it into a bytestring because python 2.6
# no longer performs an implicit string conversion on hmac
if secret_key is not None:
secret_key = bytes(secret_key)
self.secret_key = secret_key
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved. By default this is only true
for :attr:`modified` cookies, not :attr:`new`.
"""
return self.modified
@classmethod
def quote(cls, value):
"""Quote the value for the cookie. This can be any object supported
by :attr:`serialization_method`.
:param value: the value to quote.
"""
if cls.serialization_method is not None:
value = cls.serialization_method.dumps(value)
if cls.quote_base64:
value = b''.join(base64.b64encode(value).splitlines()).strip()
return value
@classmethod
def unquote(cls, value):
"""Unquote the value for the cookie. If unquoting does not work a
:exc:`UnquoteError` is raised.
:param value: the value to unquote.
"""
try:
if cls.quote_base64:
value = base64.b64decode(value)
if cls.serialization_method is not None:
value = cls.serialization_method.loads(value)
return value
except Exception:
# unfortunately pickle and other serialization modules can
# cause pretty every error here. if we get one we catch it
# and convert it into an UnquoteError
raise UnquoteError()
def serialize(self, expires=None):
"""Serialize the secure cookie into a string.
If expires is provided, the session will be automatically invalidated
after expiration when you unseralize it. This provides better
protection against session cookie theft.
:param expires: an optional expiration date for the cookie (a
:class:`datetime.datetime` object)
"""
if self.secret_key is None:
raise RuntimeError('no secret key defined')
if expires:
self['_expires'] = _date_to_unix(expires)
result = []
mac = hmac(self.secret_key, None, self.hash_method)
for key, value in sorted(self.items()):
result.append(('%s=%s' % (
url_quote_plus(key),
self.quote(value).decode('ascii')
)).encode('ascii'))
mac.update(b'|' + result[-1])
return b'?'.join([
base64.b64encode(mac.digest()).strip(),
b'&'.join(result)
])
@classmethod
def unserialize(cls, string, secret_key):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`SecureCookie`.
"""
if isinstance(string, text_type):
string = string.encode('utf-8', 'replace')
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('utf-8', 'replace')
try:
base64_hash, data = string.split(b'?', 1)
except (ValueError, IndexError):
items = ()
else:
items = {}
mac = hmac(secret_key, None, cls.hash_method)
for item in data.split(b'&'):
mac.update(b'|' + item)
if b'=' not in item:
items = None
break
key, value = item.split(b'=', 1)
# try to make the key a string
key = url_unquote_plus(key.decode('ascii'))
try:
key = to_native(key)
except UnicodeError:
pass
items[key] = value
# no parsing error and the mac looks okay, we can now
# sercurely unpickle our cookie.
try:
client_hash = base64.b64decode(base64_hash)
except TypeError:
items = client_hash = None
if items is not None and safe_str_cmp(client_hash, mac.digest()):
try:
for key, value in iteritems(items):
items[key] = cls.unquote(value)
except UnquoteError:
items = ()
else:
if '_expires' in items:
if time() > items['_expires']:
items = ()
else:
del items['_expires']
else:
items = ()
return cls(items, secret_key, False)
@classmethod
def load_cookie(cls, request, key='session', secret_key=None):
"""Loads a :class:`SecureCookie` from a cookie in request. If the
cookie is not set, a new :class:`SecureCookie` instanced is
returned.
:param request: a request object that has a `cookies` attribute
which is a dict of all cookie values.
:param key: the name of the cookie.
:param secret_key: the secret key used to unquote the cookie.
Always provide the value even though it has
no default!
"""
data = request.cookies.get(key)
if not data:
return cls(secret_key=secret_key)
return cls.unserialize(data, secret_key)
def save_cookie(self, response, key='session', expires=None,
session_expires=None, max_age=None, path='/', domain=None,
secure=None, httponly=False, force=False):
"""Saves the SecureCookie in a cookie on response object. All
parameters that are not described here are forwarded directly
to :meth:`~BaseResponse.set_cookie`.
:param response: a response object that has a
:meth:`~BaseResponse.set_cookie` method.
:param key: the name of the cookie.
:param session_expires: the expiration date of the secure cookie
stored information. If this is not provided
the cookie `expires` date is used instead.
"""
if force or self.should_save:
data = self.serialize(session_expires or expires)
response.set_cookie(key, data, expires=expires, max_age=max_age,
path=path, domain=domain, secure=secure,
httponly=httponly)
| bsd-3-clause | 3,238,733,264,377,720,300 | 36.690402 | 79 | 0.587482 | false |
jeremiahmarks/Todo.txt-python | tests/base.py | 2 | 3810 | # TODO.TXT-CLI-python test script
# Copyright (C) 2011-2012 Sigmavirus24, Jeff Stein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# TLDR: This is licensed under the GPLv3. See LICENSE for more details.
# Common functions for test_*_todo.py
import datetime
import os
import re
import sys
import unittest
sys.path.insert(0, '..')
import todo
todotxt = todo.CONFIG["TODO_FILE"] = "test_todo.txt"
donetxt = todo.CONFIG["DONE_FILE"] = "test_done.txt"
class BaseTest(unittest.TestCase):
num = 50
def default_config(self):
pass
def setUp(self):
todo.CONFIG["PRE_DATE"] = False
todo.CONFIG["TODO_PY"] = "testing"
todo.default_config = self.default_config
sys.stdout = open(os.devnull, 'w')
open(todotxt, "w+").close()
open(donetxt, "w+").close()
def tearDown(self):
sys.stdout = sys.__stdout__
if os.path.isfile(todotxt):
os.unlink(todotxt)
if os.path.isfile(donetxt):
os.unlink(donetxt)
def count_matches(self, regexp=None):
count = 0
for line in todo.iter_todos():
if regexp == None or re.match(regexp, line):
count += 1
return count
def _test_lines_no_pri(self, num):
return ["Test {0}".format(i) for i in range(0, num)]
def _test_lines_pri(self, num):
n = len(todo.PRIORITIES)
p = todo.PRIORITIES
return ["({0}) Test {1}".format(p[i % n], i) for i in range(0, num)]
def _test_lines_date(self, num):
l = self._test_lines_pri(num)
m = []
start_date = datetime.date.today()
for d, l in zip((start_date + datetime.timedelta(n) for n in range(num)), l):
m.append(todo.concat([l, " #{%s}" % d.isoformat()]))
return m
def _test_lines_project(self, num):
projects = ["+foo", "+bar", "+bogus", "+github", "+school", "+work",
"+inthemorning", "+agenda", "+noagenda"]
n = len(projects)
l = self._test_lines_pri(num)
m = []
for i in range(0, num):
m.append(todo.concat([l[i], projects[i % n]], " "))
return m
def _test_lines_context(self, num):
projects = ["@foo", "@bar", "@bogus", "@github", "@school", "@work",
"@inthemorning", "@agenda", "@noagenda"]
n = len(projects)
l = self._test_lines_pri(num)
m = []
for i in range(0, num):
m.append(todo.concat([l[i], projects[i % n]], " "))
return m
def assertNumLines(self, exp, regexp=None):
c = self.count_matches(regexp)
self.assertEqual(exp, c)
def assertIsInstance(self, obj, cls, msg=None):
if sys.version_info >= (2, 7):
super(BaseTest, self).assertIsInstance(obj, cls, msg)
else:
self.assertTrue(isinstance(obj, cls))
def assertIsNotNone(self, expr, msg=None):
if sys.version_info >= (2, 7):
super(BaseTest, self).assertIsNotNone(expr, msg)
else:
if not expr:
self.fail(msg)
def force_print(self, message):
sys.stderr.write(''.join([message, '\n']))
sys.stderr.flush()
| gpl-3.0 | -3,524,293,289,227,194,400 | 29 | 85 | 0.585827 | false |
Kha/flask-admin | examples/layout_bootstrap3/app.py | 43 | 6109 | import os
import os.path as op
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import flask_admin as admin
from flask_admin.contrib.sqla import ModelView
# Create application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['DATABASE_FILE'] = 'sample_db.sqlite'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + app.config['DATABASE_FILE']
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Models
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(64))
email = db.Column(db.Unicode(64))
def __unicode__(self):
return self.name
class Page(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Unicode(64))
content = db.Column(db.UnicodeText)
def __unicode__(self):
return self.name
# Customized admin interface
class CustomView(ModelView):
list_template = 'list.html'
create_template = 'create.html'
edit_template = 'edit.html'
class UserAdmin(CustomView):
column_searchable_list = ('name',)
column_filters = ('name', 'email')
# Flask views
@app.route('/')
def index():
return '<a href="/admin/">Click me to get to Admin!</a>'
# Create admin with custom base template
admin = admin.Admin(app, 'Example: Layout-BS3', base_template='layout.html', template_mode='bootstrap3')
# Add views
admin.add_view(UserAdmin(User, db.session))
admin.add_view(CustomView(Page, db.session))
def build_sample_db():
"""
Populate a small db with some example entries.
"""
db.drop_all()
db.create_all()
first_names = [
'Harry', 'Amelia', 'Oliver', 'Jack', 'Isabella', 'Charlie','Sophie', 'Mia',
'Jacob', 'Thomas', 'Emily', 'Lily', 'Ava', 'Isla', 'Alfie', 'Olivia', 'Jessica',
'Riley', 'William', 'James', 'Geoffrey', 'Lisa', 'Benjamin', 'Stacey', 'Lucy'
]
last_names = [
'Brown', 'Smith', 'Patel', 'Jones', 'Williams', 'Johnson', 'Taylor', 'Thomas',
'Roberts', 'Khan', 'Lewis', 'Jackson', 'Clarke', 'James', 'Phillips', 'Wilson',
'Ali', 'Mason', 'Mitchell', 'Rose', 'Davis', 'Davies', 'Rodriguez', 'Cox', 'Alexander'
]
for i in range(len(first_names)):
user = User()
user.name = first_names[i] + " " + last_names[i]
user.email = first_names[i].lower() + "@example.com"
db.session.add(user)
sample_text = [
{
'title': "de Finibus Bonorum et Malorum - Part I",
'content': "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor \
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \
exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure \
dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. \
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt \
mollit anim id est laborum."
},
{
'title': "de Finibus Bonorum et Malorum - Part II",
'content': "Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque \
laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto \
beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur \
aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi \
nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, \
adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam \
aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam \
corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum \
iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum \
qui dolorem eum fugiat quo voluptas nulla pariatur?"
},
{
'title': "de Finibus Bonorum et Malorum - Part III",
'content': "At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium \
voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati \
cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id \
est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam \
libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod \
maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. \
Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet \
ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur \
a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis \
doloribus asperiores repellat."
}
]
for entry in sample_text:
page = Page()
page.title = entry['title']
page.content = entry['content']
db.session.add(page)
db.session.commit()
return
if __name__ == '__main__':
# Build a sample db on the fly, if one does not exist yet.
app_dir = op.realpath(os.path.dirname(__file__))
database_path = op.join(app_dir, app.config['DATABASE_FILE'])
if not os.path.exists(database_path):
build_sample_db()
# Start app
app.run(debug=True)
| bsd-3-clause | -3,639,933,405,811,045,000 | 40 | 120 | 0.625143 | false |
testmana2/test | Helpviewer/HelpSearchWidget.py | 2 | 4654 | # -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing a window for showing the QtHelp index.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSignal, Qt, QEvent, QUrl
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QTextBrowser, QApplication, \
QMenu
class HelpSearchWidget(QWidget):
"""
Class implementing a window for showing the QtHelp index.
@signal linkActivated(QUrl) emitted when a search result entry is activated
@signal escapePressed() emitted when the ESC key was pressed
"""
linkActivated = pyqtSignal(QUrl)
escapePressed = pyqtSignal()
def __init__(self, engine, mainWindow, parent=None):
"""
Constructor
@param engine reference to the help search engine (QHelpSearchEngine)
@param mainWindow reference to the main window object (QMainWindow)
@param parent reference to the parent widget (QWidget)
"""
super(HelpSearchWidget, self).__init__(parent)
self.__engine = engine
self.__mw = mainWindow
self.__layout = QVBoxLayout(self)
self.__result = self.__engine.resultWidget()
self.__query = self.__engine.queryWidget()
self.__layout.addWidget(self.__query)
self.__layout.addWidget(self.__result)
self.setFocusProxy(self.__query)
self.__query.search.connect(self.__search)
self.__result.requestShowLink.connect(self.linkActivated)
self.__engine.searchingStarted.connect(self.__searchingStarted)
self.__engine.searchingFinished.connect(self.__searchingFinished)
self.__browser = self.__result.findChildren(QTextBrowser)[0]
if self.__browser:
self.__browser.viewport().installEventFilter(self)
def __search(self):
"""
Private slot to perform a search of the database.
"""
query = self.__query.query()
self.__engine.search(query)
def __searchingStarted(self):
"""
Private slot to handle the start of a search.
"""
QApplication.setOverrideCursor(Qt.WaitCursor)
def __searchingFinished(self, hits):
"""
Private slot to handle the end of the search.
@param hits number of hits (integer) (unused)
"""
QApplication.restoreOverrideCursor()
def eventFilter(self, watched, event):
"""
Public method called to filter the event queue.
@param watched the QObject being watched (QObject)
@param event the event that occurred (QEvent)
@return flag indicating whether the event was handled (boolean)
"""
if self.__browser and watched == self.__browser.viewport() and \
event.type() == QEvent.MouseButtonRelease:
link = self.__result.linkAt(event.pos())
if not link.isEmpty() and link.isValid():
ctrl = event.modifiers() & Qt.ControlModifier
if (event.button() == Qt.LeftButton and ctrl) or \
event.button() == Qt.MidButton:
self.__mw.newTab(link)
return QWidget.eventFilter(self, watched, event)
def keyPressEvent(self, evt):
"""
Protected method handling key press events.
@param evt reference to the key press event (QKeyEvent)
"""
if evt.key() == Qt.Key_Escape:
self.escapePressed.emit()
else:
evt.ignore()
def contextMenuEvent(self, evt):
"""
Protected method handling context menu events.
@param evt reference to the context menu event (QContextMenuEvent)
"""
point = evt.globalPos()
if self.__browser:
point = self.__browser.mapFromGlobal(point)
if not self.__browser.rect().contains(point, True):
return
link = QUrl(self.__browser.anchorAt(point))
else:
point = self.__result.mapFromGlobal(point)
link = self.__result.linkAt(point)
if link.isEmpty() or not link.isValid():
return
menu = QMenu()
curTab = menu.addAction(self.tr("Open Link"))
newTab = menu.addAction(self.tr("Open Link in New Tab"))
menu.move(evt.globalPos())
act = menu.exec_()
if act == curTab:
self.linkActivated.emit(link)
elif act == newTab:
self.__mw.newTab(link)
| gpl-3.0 | 3,148,641,403,850,601,000 | 32.482014 | 79 | 0.585733 | false |
Judystudy/gooderp_addons | sell/report/sell_summary_goods.py | 6 | 6707 | # -*- coding: utf-8 -*-
from odoo import fields, models, api
import odoo.addons.decimal_precision as dp
import datetime
class SellSummaryGoods(models.Model):
_name = 'sell.summary.goods'
_inherit = 'report.base'
_description = u'销售汇总表(按商品)'
id_lists = fields.Text(u'移动明细行id列表')
goods_categ = fields.Char(u'商品类别')
goods_code = fields.Char(u'商品编码')
goods = fields.Char(u'商品名称')
attribute = fields.Char(u'属性')
warehouse = fields.Char(u'仓库')
qty_uos = fields.Float(u'辅助数量', digits=dp.get_precision('Quantity'))
uos = fields.Char(u'辅助单位')
qty = fields.Float(u'基本数量', digits=dp.get_precision('Quantity'))
uom = fields.Char(u'基本单位')
price = fields.Float(u'单价', digits=dp.get_precision('Price'))
amount = fields.Float(u'销售收入', digits=dp.get_precision('Amount'))
tax_amount = fields.Float(u'税额', digits=dp.get_precision('Amount'))
subtotal = fields.Float(u'价税合计', digits=dp.get_precision('Amount'))
margin = fields.Float(u'毛利', digits=dp.get_precision('Amount'))
def select_sql(self, sql_type='out'):
return '''
SELECT MIN(wml.id) as id,
array_agg(wml.id) AS id_lists,
categ.name AS goods_categ,
goods.code AS goods_code,
goods.name AS goods,
attr.name AS attribute,
wh.name AS warehouse,
SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.goods_uos_qty
ELSE - wml.goods_uos_qty END) AS qty_uos,
uos.name AS uos,
SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.goods_qty
ELSE - wml.goods_qty END) AS qty,
uom.name AS uom,
(CASE WHEN SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.goods_qty
ELSE - wml.goods_qty END) = 0 THEN 0
ELSE
SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.amount
ELSE - wml.amount END)
/ SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.goods_qty
ELSE - wml.goods_qty END)
END) AS price,
SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.amount
ELSE - wml.amount END) AS amount,
SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.tax_amount
ELSE - wml.tax_amount END) AS tax_amount,
SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.subtotal
ELSE - wml.subtotal END) AS subtotal,
(SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.amount
ELSE - wml.amount END) - SUM(CASE WHEN wm.origin = 'sell.delivery.sell' THEN wml.goods_qty
ELSE - wml.goods_qty END) * wml.cost_unit) AS margin
'''
def from_sql(self, sql_type='out'):
return '''
FROM wh_move_line AS wml
LEFT JOIN wh_move wm ON wml.move_id = wm.id
LEFT JOIN partner ON wm.partner_id = partner.id
LEFT JOIN goods ON wml.goods_id = goods.id
LEFT JOIN core_category AS categ ON goods.category_id = categ.id
LEFT JOIN attribute AS attr ON wml.attribute_id = attr.id
LEFT JOIN warehouse AS wh ON wml.warehouse_id = wh.id
OR wml.warehouse_dest_id = wh.id
LEFT JOIN uom AS uos ON goods.uos_id = uos.id
LEFT JOIN uom ON goods.uom_id = uom.id
'''
def where_sql(self, sql_type='out'):
extra = ''
if self.env.context.get('partner_id'):
extra += 'AND partner.id = {partner_id}'
if self.env.context.get('goods_id'):
extra += 'AND goods.id = {goods_id}'
if self.env.context.get('goods_categ_id'):
extra += 'AND categ.id = {goods_categ_id}'
if self.env.context.get('warehouse_id'):
extra += 'AND wh.id = {warehouse_id}'
return '''
WHERE wml.state = 'done'
AND wml.date >= '{date_start}'
AND wml.date < '{date_end}'
AND wm.origin like 'sell.delivery%%'
AND wh.type = 'stock'
%s
''' % extra
def group_sql(self, sql_type='out'):
return '''
GROUP BY goods_categ,goods_code,goods,attribute,warehouse,uos,uom,wml.cost_unit
'''
def order_sql(self, sql_type='out'):
return '''
ORDER BY goods_code,goods,attribute,warehouse
'''
def get_context(self, sql_type='out', context=None):
date_end = datetime.datetime.strptime(
context.get('date_end'), '%Y-%m-%d') + datetime.timedelta(days=1)
date_end = date_end.strftime('%Y-%m-%d')
return {
'date_start': context.get('date_start') or '',
'date_end': date_end,
'partner_id': context.get('partner_id') and context.get('partner_id')[0] or '',
'goods_id': context.get('goods_id') and context.get('goods_id')[0] or '',
'goods_categ_id': context.get('goods_categ_id') and context.get('goods_categ_id')[0] or '',
'warehouse_id': context.get('warehouse_id') and context.get('warehouse_id')[0] or '',
}
def _compute_order(self, result, order):
order = order or 'goods_code ASC'
return super(SellSummaryGoods, self)._compute_order(result, order)
def collect_data_by_sql(self, sql_type='out'):
collection = self.execute_sql(sql_type='out')
return collection
@api.multi
def view_detail(self):
'''销售汇总表(按商品)查看明细按钮'''
self.ensure_one()
line_ids = []
res = []
move_lines = []
result = self.get_data_from_cache()
for line in result:
if line.get('id') == self.id:
line_ids = line.get('id_lists')
move_lines = self.env['wh.move.line'].search(
[('id', 'in', line_ids)])
for move_line in move_lines:
details = self.env['sell.order.detail'].search(
[('order_name', '=', move_line.move_id.name),
('goods_id', '=', move_line.goods_id.id)])
for detail in details:
res.append(detail.id)
return {
'name': u'销售明细表',
'view_mode': 'tree',
'view_id': False,
'res_model': 'sell.order.detail',
'type': 'ir.actions.act_window',
'domain': [('id', 'in', res)],
}
| agpl-3.0 | 5,290,346,230,518,311,000 | 40.649682 | 110 | 0.544426 | false |
zerkrx/zerkbox | lib/youtube_dl/extractor/disney.py | 21 | 6696 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
compat_str,
determine_ext,
ExtractorError,
)
class DisneyIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?P<domain>(?:[^/]+\.)?(?:disney\.[a-z]{2,3}(?:\.[a-z]{2})?|disney(?:(?:me|latino)\.com|turkiye\.com\.tr|channel\.de)|(?:starwars|marvelkids)\.com))/(?:(?:embed/|(?:[^/]+/)+[\w-]+-)(?P<id>[a-z0-9]{24})|(?:[^/]+/)?(?P<display_id>[^/?#]+))'''
_TESTS = [{
# Disney.EmbedVideo
'url': 'http://video.disney.com/watch/moana-trailer-545ed1857afee5a0ec239977',
'info_dict': {
'id': '545ed1857afee5a0ec239977',
'ext': 'mp4',
'title': 'Moana - Trailer',
'description': 'A fun adventure for the entire Family! Bring home Moana on Digital HD Feb 21 & Blu-ray March 7',
'upload_date': '20170112',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
# Grill.burger
'url': 'http://www.starwars.com/video/rogue-one-a-star-wars-story-intro-featurette',
'info_dict': {
'id': '5454e9f4e9804a552e3524c8',
'ext': 'mp4',
'title': '"Intro" Featurette: Rogue One: A Star Wars Story',
'upload_date': '20170104',
'description': 'Go behind-the-scenes of Rogue One: A Star Wars Story in this featurette with Director Gareth Edwards and the cast of the film.',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
'url': 'http://videos.disneylatino.com/ver/spider-man-de-regreso-a-casa-primer-adelanto-543a33a1850bdcfcca13bae2',
'only_matching': True,
}, {
'url': 'http://video.en.disneyme.com/watch/future-worm/robo-carp-2001-544b66002aa7353cdd3f5114',
'only_matching': True,
}, {
'url': 'http://video.disneyturkiye.com.tr/izle/7c-7-cuceler/kimin-sesi-zaten-5456f3d015f6b36c8afdd0e2',
'only_matching': True,
}, {
'url': 'http://disneyjunior.disney.com/embed/546a4798ddba3d1612e4005d',
'only_matching': True,
}, {
'url': 'http://www.starwars.com/embed/54690d1e6c42e5f09a0fb097',
'only_matching': True,
}, {
'url': 'http://spiderman.marvelkids.com/embed/522900d2ced3c565e4cc0677',
'only_matching': True,
}, {
'url': 'http://spiderman.marvelkids.com/videos/contest-of-champions-part-four-clip-1',
'only_matching': True,
}, {
'url': 'http://disneyjunior.en.disneyme.com/dj/watch-my-friends-tigger-and-pooh-promo',
'only_matching': True,
}, {
'url': 'http://disneychannel.de/sehen/soy-luna-folge-118-5518518987ba27f3cc729268',
'only_matching': True,
}, {
'url': 'http://disneyjunior.disney.com/galactech-the-galactech-grab-galactech-an-admiral-rescue',
'only_matching': True,
}]
def _real_extract(self, url):
domain, video_id, display_id = re.match(self._VALID_URL, url).groups()
if not video_id:
webpage = self._download_webpage(url, display_id)
grill = re.sub(r'"\s*\+\s*"', '', self._search_regex(
r'Grill\.burger\s*=\s*({.+})\s*:',
webpage, 'grill data'))
page_data = next(s for s in self._parse_json(grill, display_id)['stack'] if s.get('type') == 'video')
video_data = page_data['data'][0]
else:
webpage = self._download_webpage(
'http://%s/embed/%s' % (domain, video_id), video_id)
page_data = self._parse_json(self._search_regex(
r'Disney\.EmbedVideo\s*=\s*({.+});',
webpage, 'embed data'), video_id)
video_data = page_data['video']
for external in video_data.get('externals', []):
if external.get('source') == 'vevo':
return self.url_result('vevo:' + external['data_id'], 'Vevo')
video_id = video_data['id']
title = video_data['title']
formats = []
for flavor in video_data.get('flavors', []):
flavor_format = flavor.get('format')
flavor_url = flavor.get('url')
if not flavor_url or not re.match(r'https?://', flavor_url) or flavor_format == 'mp4_access':
continue
tbr = int_or_none(flavor.get('bitrate'))
if tbr == 99999:
formats.extend(self._extract_m3u8_formats(
flavor_url, video_id, 'mp4',
m3u8_id=flavor_format, fatal=False))
continue
format_id = []
if flavor_format:
format_id.append(flavor_format)
if tbr:
format_id.append(compat_str(tbr))
ext = determine_ext(flavor_url)
if flavor_format == 'applehttp' or ext == 'm3u8':
ext = 'mp4'
width = int_or_none(flavor.get('width'))
height = int_or_none(flavor.get('height'))
formats.append({
'format_id': '-'.join(format_id),
'url': flavor_url,
'width': width,
'height': height,
'tbr': tbr,
'ext': ext,
'vcodec': 'none' if (width == 0 and height == 0) else None,
})
if not formats and video_data.get('expired'):
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, page_data['translations']['video_expired']),
expected=True)
self._sort_formats(formats)
subtitles = {}
for caption in video_data.get('captions', []):
caption_url = caption.get('url')
caption_format = caption.get('format')
if not caption_url or caption_format.startswith('unknown'):
continue
subtitles.setdefault(caption.get('language', 'en'), []).append({
'url': caption_url,
'ext': {
'webvtt': 'vtt',
}.get(caption_format, caption_format),
})
return {
'id': video_id,
'title': title,
'description': video_data.get('description') or video_data.get('short_desc'),
'thumbnail': video_data.get('thumb') or video_data.get('thumb_secure'),
'duration': int_or_none(video_data.get('duration_sec')),
'upload_date': unified_strdate(video_data.get('publish_date')),
'formats': formats,
'subtitles': subtitles,
}
| gpl-3.0 | -1,637,865,295,245,653,000 | 40.333333 | 257 | 0.528524 | false |
mastizada/kuma | kuma/wiki/migrations/0023_attachment_m2m.py | 5 | 17911 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DocumentAttachment'
db.create_table('wiki_documentattachment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('file', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['attachments.Attachment'])),
('document', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Document'])),
('attached_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('name', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('attachments', ['DocumentAttachment'])
def backwards(self, orm):
# Deleting model 'DocumentAttachment'
db.delete_table('wiki_documentattachment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tidings.watch': {
'Meta': {'object_name': 'Watch'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'attachments.attachment': {
'Meta': {'object_name': 'Attachment', 'db_table': "'wiki_attachment'"},
'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_rev'", 'null': 'True', 'to': "orm['attachments.AttachmentRevision']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mindtouch_attachment_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'attachments.attachmentrevision': {
'Meta': {'object_name': 'AttachmentRevision', 'db_table': "'wiki_attachmentrevision'"},
'attachment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['attachments.Attachment']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_attachment_revisions'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_mindtouch_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'mindtouch_old_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'})
},
'wiki.document': {
'Meta': {'unique_together': "(('parent', 'locale'), ('slug', 'locale'))", 'object_name': 'Document'},
'category': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_for+'", 'null': 'True', 'to': "orm['wiki.Revision']"}),
'defer_rendering': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'files': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['attachments.Attachment']", 'through': "orm['wiki.DocumentAttachment']", 'symmetrical': 'False'}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_localizable': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_rendered_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'locale': ('kuma.core.fields.LocaleField', [], {'default': "'en-US'", 'max_length': '7', 'db_index': 'True'}),
'mindtouch_page_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': "orm['wiki.Document']"}),
'parent_topic': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['wiki.Document']"}),
'related_documents': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['wiki.Document']", 'through': "orm['wiki.RelatedDocument']", 'symmetrical': 'False'}),
'render_scheduled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'render_started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'rendered_errors': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rendered_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'wiki.documentattachment': {
'Meta': {'object_name': 'DocumentAttachment'},
'attached_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Document']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['attachments.Attachment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
},
'wiki.documenttag': {
'Meta': {'object_name': 'DocumentTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'wiki.editortoolbar': {
'Meta': {'object_name': 'EditorToolbar'},
'code': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_toolbars'", 'to': "orm['auth.User']"}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wiki.firefoxversion': {
'Meta': {'unique_together': "(('item_id', 'document'),)", 'object_name': 'FirefoxVersion'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'firefox_version_set'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.IntegerField', [], {})
},
'wiki.helpfulvote': {
'Meta': {'object_name': 'HelpfulVote'},
'anonymous_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'null': 'True', 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'to': "orm['wiki.Document']"}),
'helpful': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'wiki.operatingsystem': {
'Meta': {'unique_together': "(('item_id', 'document'),)", 'object_name': 'OperatingSystem'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'operating_system_set'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.IntegerField', [], {})
},
'wiki.relateddocument': {
'Meta': {'ordering': "['-in_common']", 'object_name': 'RelatedDocument'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_from'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_common': ('django.db.models.fields.IntegerField', [], {}),
'related': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_to'", 'to': "orm['wiki.Document']"})
},
'wiki.reviewtag': {
'Meta': {'object_name': 'ReviewTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'wiki.reviewtaggedrevision': {
'Meta': {'object_name': 'ReviewTaggedRevision'},
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ReviewTag']"})
},
'wiki.revision': {
'Meta': {'object_name': 'Revision'},
'based_on': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']", 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_revisions'", 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_mindtouch_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'mindtouch_old_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'reviewed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviewed_revisions'", 'null': 'True', 'to': "orm['auth.User']"}),
'show_toc': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'significance': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'})
},
'wiki.taggeddocument': {
'Meta': {'object_name': 'TaggedDocument'},
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.DocumentTag']"})
}
}
complete_apps = ['wiki']
| mpl-2.0 | 7,916,257,407,508,146,000 | 81.16055 | 187 | 0.555245 | false |
hernandito/SickRage | lib/sqlalchemy/dialects/postgresql/zxjdbc.py | 79 | 1395 | # postgresql/zxjdbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: postgresql+zxjdbc://scott:tiger@localhost/db
:driverurl: http://jdbc.postgresql.org/
"""
from ...connectors.zxJDBC import ZxJDBCConnector
from .base import PGDialect, PGExecutionContext
class PGExecutionContext_zxjdbc(PGExecutionContext):
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect):
jdbc_db_name = 'postgresql'
jdbc_driver_name = 'org.postgresql.Driver'
execution_ctx_cls = PGExecutionContext_zxjdbc
supports_native_decimal = True
def __init__(self, *args, **kwargs):
super(PGDialect_zxjdbc, self).__init__(*args, **kwargs)
from com.ziclix.python.sql.handler import PostgresqlDataHandler
self.DataHandler = PostgresqlDataHandler
def _get_server_version_info(self, connection):
parts = connection.connection.dbversion.split('.')
return tuple(int(x) for x in parts)
dialect = PGDialect_zxjdbc
| gpl-3.0 | 6,737,825,263,692,701,000 | 30 | 84 | 0.717563 | false |
sserrot/champion_relationships | venv/Lib/site-packages/win32/lib/pywintypes.py | 6 | 7120 | # Magic utility that "redirects" to pywintypesxx.dll
import imp, sys, os
def __import_pywin32_system_module__(modname, globs):
# This has been through a number of iterations. The problem: how to
# locate pywintypesXX.dll when it may be in a number of places, and how
# to avoid ever loading it twice. This problem is compounded by the
# fact that the "right" way to do this requires win32api, but this
# itself requires pywintypesXX.
# And the killer problem is that someone may have done 'import win32api'
# before this code is called. In that case Windows will have already
# loaded pywintypesXX as part of loading win32api - but by the time
# we get here, we may locate a different one. This appears to work, but
# then starts raising bizarre TypeErrors complaining that something
# is not a pywintypes type when it clearly is!
# So in what we hope is the last major iteration of this, we now
# rely on a _win32sysloader module, implemented in C but not relying
# on pywintypesXX.dll. It then can check if the DLL we are looking for
# lib is already loaded.
if not sys.platform.startswith("win32"):
# These extensions can be built on Linux via the 'mainwin' toolkit.
# Look for a native 'lib{modname}.so'
# NOTE: The _win32sysloader module will probably build in this
# environment, so it may be better to use that here too.
for ext, mode, ext_type in imp.get_suffixes():
if ext_type==imp.C_EXTENSION:
for path in sys.path:
look = os.path.join(path, "lib" + modname + ext)
if os.path.isfile(look):
mod = imp.load_module(modname, None, look,
(ext, mode, ext_type))
# and fill our namespace with it.
# XXX - if this ever moves to py3k, this will probably
# need similar adjustments as below...
globs.update(mod.__dict__)
return
raise ImportError("No dynamic module " + modname)
# See if this is a debug build.
for suffix_item in imp.get_suffixes():
if suffix_item[0]=='_d.pyd':
suffix = '_d'
break
else:
suffix = ""
filename = "%s%d%d%s.dll" % \
(modname, sys.version_info[0], sys.version_info[1], suffix)
if hasattr(sys, "frozen"):
# If we are running from a frozen program (py2exe, McMillan, freeze)
# then we try and load the DLL from our sys.path
# XXX - This path may also benefit from _win32sysloader? However,
# MarkH has never seen the DLL load problem with py2exe programs...
for look in sys.path:
# If the sys.path entry is a (presumably) .zip file, use the
# directory
if os.path.isfile(look):
look = os.path.dirname(look)
found = os.path.join(look, filename)
if os.path.isfile(found):
break
else:
raise ImportError("Module '%s' isn't in frozen sys.path %s" % (modname, sys.path))
else:
# First see if it already in our process - if so, we must use that.
import _win32sysloader
found = _win32sysloader.GetModuleFilename(filename)
if found is None:
# We ask Windows to load it next. This is in an attempt to
# get the exact same module loaded should pywintypes be imported
# first (which is how we are here) or if, eg, win32api was imported
# first thereby implicitly loading the DLL.
# Sadly though, it doesn't quite work - if pywintypesxx.dll
# is in system32 *and* the executable's directory, on XP SP2, an
# import of win32api will cause Windows to load pywintypes
# from system32, where LoadLibrary for that name will
# load the one in the exe's dir.
# That shouldn't really matter though, so long as we only ever
# get one loaded.
found = _win32sysloader.LoadModule(filename)
if found is None:
# Windows can't find it - which although isn't relevent here,
# means that we *must* be the first win32 import, as an attempt
# to import win32api etc would fail when Windows attempts to
# locate the DLL.
# This is most likely to happen for "non-admin" installs, where
# we can't put the files anywhere else on the global path.
# If there is a version in our Python directory, use that
if os.path.isfile(os.path.join(sys.prefix, filename)):
found = os.path.join(sys.prefix, filename)
if found is None:
# Not in the Python directory? Maybe we were installed via
# easy_install...
if os.path.isfile(os.path.join(os.path.dirname(__file__), filename)):
found = os.path.join(os.path.dirname(__file__), filename)
if found is None:
# We might have been installed via PIP and without the post-install
# script having been run, so they might be in the
# lib/site-packages/pywin32_system32 directory.
# This isn't ideal as it means, say 'python -c "import win32api"'
# will not work but 'python -c "import pywintypes, win32api"' will,
# but it's better than nothing...
import distutils.sysconfig
maybe = os.path.join(distutils.sysconfig.get_python_lib(plat_specific=1),
"pywin32_system32", filename)
if os.path.isfile(maybe):
found = maybe
if found is None:
# give up in disgust.
raise ImportError("No system module '%s' (%s)" % (modname, filename))
# py2k and py3k differences:
# On py2k, after doing "imp.load_module('pywintypes')", sys.modules
# is unchanged - ie, sys.modules['pywintypes'] still refers to *this*
# .py module - but the module's __dict__ has *already* need updated
# with the new module's contents.
# However, on py3k, sys.modules *is* changed - sys.modules['pywintypes']
# will be changed to the new module object.
# SO: * on py2k don't need to update any globals.
# * on py3k we update our module dict with the new module's dict and
# copy its globals to ours.
old_mod = sys.modules[modname]
# Python can load the module
mod = imp.load_dynamic(modname, found)
# Check the sys.modules[] behaviour we describe above is true...
if sys.version_info < (3,0):
assert sys.modules[modname] is old_mod
assert mod is old_mod
else:
assert sys.modules[modname] is not old_mod
assert sys.modules[modname] is mod
# as above - re-reset to the *old* module object then update globs.
sys.modules[modname] = old_mod
globs.update(mod.__dict__)
__import_pywin32_system_module__("pywintypes", globals())
| mit | 8,582,109,002,368,490,000 | 51.352941 | 94 | 0.603511 | false |
yavalvas/yav_com | build/matplotlib/examples/user_interfaces/embedding_in_wx2.py | 9 | 2706 | #!/usr/bin/env python
"""
An example of how to use wx or wxagg in an application with the new
toolbar - comment out the setA_toolbar line for no toolbar
"""
# Used to guarantee to use at least Wx2.8
import wxversion
wxversion.ensureMinimal('2.8')
from numpy import arange, sin, pi
import matplotlib
# uncomment the following to use wx rather than wxagg
#matplotlib.use('WX')
#from matplotlib.backends.backend_wx import FigureCanvasWx as FigureCanvas
# comment out the following to use wx rather than wxagg
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import wx
class CanvasFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,-1,
'CanvasFrame',size=(550,350))
self.SetBackgroundColour(wx.NamedColour("WHITE"))
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
self.axes.plot(t,s)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.sizer)
self.Fit()
self.add_toolbar() # comment this out for no toolbar
def add_toolbar(self):
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
if wx.Platform == '__WXMAC__':
# Mac platform (OSX 10.3, MacPython) does not seem to cope with
# having a toolbar in a sizer. This work-around gets the buttons
# back, but at the expense of having the toolbar at the top
self.SetToolBar(self.toolbar)
else:
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
# As noted above, doesn't work for Mac.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
# update the axes menu on the toolbar
self.toolbar.update()
def OnPaint(self, event):
self.canvas.draw()
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
| mit | -3,529,546,067,889,718,000 | 29.75 | 79 | 0.640798 | false |
cbschaff/NBP | src/models/beacon10.py | 1 | 1853 | """
Copyright (C) 2017 Charles Schaff, David Yunis, Ayan Chakrabarti,
Matthew R. Walter. See LICENSE.txt for details.
"""
# Beacon model 10: fixed beacons of 8 channels in alternating clusters (of different subsets of 16)
import tensorflow as tf
import numpy as np
# Use with 8 channels
wn=1
def beacon(self):
NCHAN=self.NCHAN
v = np.zeros((25,25,9),dtype=np.float32)
v[:,:,0] = 0.5
y = [[ 4, 3, 5, 1],
[ 7, 2, 6, 8,],
[ 3, 6, 5, 4,],
[ 8, 4, 1, 7,],
[ 6, 8, 5, 3,],
[ 2, 4, 3, 7,],
[ 1, 2, 5, 6,],
[ 3, 8, 2, 7,],
[ 8, 5, 6, 4,],
[ 7, 1, 2, 3,],
[ 8, 3, 6, 2,],
[ 5, 4, 1, 7,],
[ 3, 7, 8, 6,],
[ 2, 1, 3, 4,],
[ 8, 2, 7, 6,],
[ 5, 1, 3, 4,],
[ 8, 2, 7, 5,],
[ 3, 4, 6, 1,],
[ 1, 2, 6, 8,],
[ 5, 7, 3, 4,],
[ 7, 2, 8, 1,],
[ 4, 5, 6, 3,],
[ 8, 2, 3, 7,],
[ 5, 1, 6, 4,],
[ 2, 3, 8, 1,],
[ 4, 7, 6, 5,],
[ 8, 3, 1, 7,],
[ 2, 6, 7, 8,],
[ 1, 3, 5, 4,],
[ 2, 7, 8, 6,],
[ 8, 3, 2, 7,],
[ 4, 6, 5, 1,],
[ 8, 4, 3, 7,],
[ 6, 2, 5, 1,],
[ 4, 3, 7, 2,],
[ 8, 5, 6, 1,]]
x= np.array(y)
for i in xrange(6):
for j in xrange(6):
v[2+4*i,2+4*j,x[6*i+j,0]] = 1.0
v[3+4*i,2+4*j,x[6*i+j,1]] = 1.0
v[2+4*i,3+4*j,x[6*i+j,2]] = 1.0
v[3+4*i,3+4*j,x[6*i+j,3]] = 1.0
v = np.reshape(v,(9*625))
lgsen = tf.Variable(v,trainable=False)
self.weights['sensor'] = lgsen
self.entropy = tf.constant(0)
lgsen = tf.reshape(lgsen,[1,self.NTX,NCHAN+1,1])
lgout = tf.to_float(tf.equal(lgsen,tf.nn.max_pool(lgsen,\
[1,1,self.NCHAN+1,1],[1,1,self.NCHAN+1,1],'VALID')))
lgout = tf.reshape(lgout,[1,1,self.NTX,NCHAN+1])
lgout = tf.slice(lgout,begin=[0,0,0,1],size=[-1,-1,-1,-1])
return lgout
| gpl-3.0 | 6,338,860,614,589,693,000 | 22.75641 | 99 | 0.426875 | false |
msreis/SigNetSim | signetsim/views/auth/ActivateAccountView.py | 2 | 2778 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel ([email protected])
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
""" ActivateAccountView.py
This file ...
"""
from django.views.generic import TemplateView
from signetsim.views.HasUserLoggedIn import HasUserLoggedIn
from signetsim.models import User
from django.core.mail import send_mail
from django.conf import settings
class ActivateAccountView(TemplateView, HasUserLoggedIn):
template_name = 'accounts/activate_account.html'
def __init__(self, **kwargs):
TemplateView.__init__(self, **kwargs)
HasUserLoggedIn.__init__(self, **kwargs)
self.activated = False
def get_context_data(self, **kwargs):
kwargs['activated'] = self.activated
return kwargs
def get(self, request, *args, **kwargs):
if (request.user.is_staff is True
and request.GET.get('username') != None
and User.objects.filter(username=request.GET['username']).exists()):
t_user = User.objects.get(username=request.GET['username'])
t_user.is_active = True
t_user.save()
# For test runs
if 'HTTP_HOST' in request.META:
self.sendUserEmail(request, t_user.username, t_user.email)
self.activated = True
return TemplateView.get(self, request, *args, **kwargs)
def sendUserEmail(self, request, username, email):
url = settings.BASE_URL
if "HTTP_X_SCRIPT_NAME" in request.META and request.META['HTTP_X_SCRIPT_NAME'] != "":
url = str(request.META['HTTP_X_SCRIPT_NAME']) + url
if "HTTP_X_SCHEME" in request.META and request.META['HTTP_X_SCHEME'] != "":
url = "%s://%s%s" % (str(request.META['HTTP_X_SCHEME']), request.META['HTTP_HOST'], url)
else:
url = "%s://%s%s" % (request.scheme, request.META['HTTP_HOST'], url)
login_url = "%saccounts/login/" % url
send_mail(
subject='SigNetSim user account activated',
message='',
html_message='Dear %s, <br/><br/>Your SigNetSim account has just been activated ! <br>You can start using it right now, by going to the page <br/>%s<br/>' % (
username, login_url),
from_email=settings.EMAIL_ADDRESS,
recipient_list=[email],
fail_silently=True,
) | agpl-3.0 | 8,917,291,774,644,286,000 | 30.579545 | 161 | 0.706263 | false |
Jin-W-FS/chinese-words-segmentation-test | WordDict.py | 1 | 1045 | #!/usr/bin/env python
# *- coding: utf-8 -*-
PUNCTUATIONS = (set(u'''`~!@#$%^&*()_+-={}[]|\:";'<>?,./ ''') |
set(u'''~`!@#¥%……&*()——+-=『』【】、‘’“”:;《》?,。/''')) - \
set(u'''_''') # not punctuation
class WordDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.total = sum(self[k][0] for k in self)
self.word_maxlen = max(len(k) for k in self)
self.punctuations = PUNCTUATIONS
def add(self, word, freq = 1, attr = ''):
if not word in self:
self[word] = (0, '')
if len(word) > self.word_maxlen:
self.word_maxlen = len(word)
f, a = self[word]
self[word] = (f + freq, a | set(attr))
set.total += freq
@staticmethod
def Load(file, encoding = 'utf-8'):
d = {}
for line in file:
s = line.decode(encoding).split()
d[s[0]] = (int(s[1]), set(s[2]))
return WordDict(d)
| gpl-2.0 | -1,600,387,630,918,851,300 | 34.607143 | 68 | 0.446339 | false |
SU-ECE-17-7/ibeis | ibeis/web/routes.py | 1 | 58514 | # -*- coding: utf-8 -*-
"""
Dependencies: flask, tornado
"""
from __future__ import absolute_import, division, print_function
import random
import math
from flask import request, current_app, url_for
from ibeis.control import controller_inject
from ibeis import constants as const
from ibeis.constants import KEY_DEFAULTS, SPECIES_KEY
from ibeis.web import appfuncs as appf
from ibeis.web import routes_ajax
import utool as ut
import vtool as vt
import numpy as np
register_route = controller_inject.get_ibeis_flask_route(__name__)
@register_route('/', methods=['GET'])
def root():
return appf.template(None)
@register_route('/view/', methods=['GET'])
def view():
def _date_list(gid_list):
unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(unixtime)
if unixtime is not None else
'UNKNOWN'
for unixtime in unixtime_list
]
datetime_split_list = [ datetime.split(' ') for datetime in datetime_list ]
date_list = [ datetime_split[0] if len(datetime_split) == 2 else 'UNKNOWN' for datetime_split in datetime_split_list ]
return date_list
def filter_annots_imageset(aid_list):
try:
imgsetid = request.args.get('imgsetid', '')
imgsetid = int(imgsetid)
imgsetid_list = ibs.get_valid_imgsetids()
assert imgsetid in imgsetid_list
except:
print('ERROR PARSING IMAGESET ID FOR ANNOTATION FILTERING')
return aid_list
imgsetids_list = ibs.get_annot_imgsetids(aid_list)
aid_list = [
aid
for aid, imgsetid_list_ in zip(aid_list, imgsetids_list)
if imgsetid in imgsetid_list_
]
return aid_list
def filter_images_imageset(gid_list):
try:
imgsetid = request.args.get('imgsetid', '')
imgsetid = int(imgsetid)
imgsetid_list = ibs.get_valid_imgsetids()
assert imgsetid in imgsetid_list
except:
print('ERROR PARSING IMAGESET ID FOR IMAGE FILTERING')
return gid_list
imgsetids_list = ibs.get_image_imgsetids(gid_list)
gid_list = [
gid
for gid, imgsetid_list_ in zip(gid_list, imgsetids_list)
if imgsetid in imgsetid_list_
]
return gid_list
def filter_names_imageset(nid_list):
try:
imgsetid = request.args.get('imgsetid', '')
imgsetid = int(imgsetid)
imgsetid_list = ibs.get_valid_imgsetids()
assert imgsetid in imgsetid_list
except:
print('ERROR PARSING IMAGESET ID FOR ANNOTATION FILTERING')
return nid_list
aids_list = ibs.get_name_aids(nid_list)
imgsetids_list = [
set(ut.flatten(ibs.get_annot_imgsetids(aid_list)))
for aid_list in aids_list
]
nid_list = [
nid
for nid, imgsetid_list_ in zip(nid_list, imgsetids_list)
if imgsetid in imgsetid_list_
]
return nid_list
ibs = current_app.ibs
filter_kw = {
'multiple': None,
'minqual': 'good',
'is_known': True,
'min_pername': 1,
'view': ['right'],
}
aid_list = ibs.get_valid_aids()
aid_list = ibs.filter_annots_general(aid_list, filter_kw=filter_kw)
aid_list = filter_annots_imageset(aid_list)
gid_list = ibs.get_annot_gids(aid_list)
unixtime_list = ibs.get_image_unixtime(gid_list)
nid_list = ibs.get_annot_name_rowids(aid_list)
date_list = _date_list(gid_list)
flagged_date_list = ['2016/01/29', '2016/01/30', '2016/01/31', '2016/02/01']
gid_list_unique = list(set(gid_list))
date_list_unique = _date_list(gid_list_unique)
date_taken_dict = {}
for gid, date in zip(gid_list_unique, date_list_unique):
if date not in flagged_date_list:
continue
if date not in date_taken_dict:
date_taken_dict[date] = [0, 0]
date_taken_dict[date][1] += 1
gid_list_all = ibs.get_valid_gids()
gid_list_all = filter_images_imageset(gid_list_all)
date_list_all = _date_list(gid_list_all)
for gid, date in zip(gid_list_all, date_list_all):
if date not in flagged_date_list:
continue
if date in date_taken_dict:
date_taken_dict[date][0] += 1
value = 0
label_list = []
value_list = []
index_list = []
seen_set = set()
current_seen_set = set()
previous_seen_set = set()
last_date = None
date_seen_dict = {}
for index, (unixtime, aid, nid, date) in enumerate(sorted(zip(unixtime_list, aid_list, nid_list, date_list))):
if date not in flagged_date_list:
continue
index_list.append(index + 1)
# Add to counters
if date not in date_seen_dict:
date_seen_dict[date] = [0, 0, 0, 0]
date_seen_dict[date][0] += 1
if nid not in current_seen_set:
current_seen_set.add(nid)
date_seen_dict[date][1] += 1
if nid in previous_seen_set:
date_seen_dict[date][3] += 1
if nid not in seen_set:
seen_set.add(nid)
value += 1
date_seen_dict[date][2] += 1
# Add to register
value_list.append(value)
# Reset step (per day)
if date != last_date and date != 'UNKNOWN':
last_date = date
previous_seen_set = set(current_seen_set)
current_seen_set = set()
label_list.append(date)
else:
label_list.append('')
# def optimization1(x, a, b, c):
# return a * np.log(b * x) + c
# def optimization2(x, a, b, c):
# return a * np.sqrt(x) ** b + c
# def optimization3(x, a, b, c):
# return 1.0 / (a * np.exp(-b * x) + c)
# def process(func, opts, domain, zero_index, zero_value):
# values = func(domain, *opts)
# diff = values[zero_index] - zero_value
# values -= diff
# values[ values < 0.0 ] = 0.0
# values[:zero_index] = 0.0
# values = values.astype(int)
# return list(values)
# optimization_funcs = [
# optimization1,
# optimization2,
# optimization3,
# ]
# # Get data
# x = np.array(index_list)
# y = np.array(value_list)
# # Fit curves
# end = int(len(index_list) * 1.25)
# domain = np.array(range(1, end))
# zero_index = len(value_list) - 1
# zero_value = value_list[zero_index]
# regressed_opts = [ curve_fit(func, x, y)[0] for func in optimization_funcs ]
# prediction_list = [
# process(func, opts, domain, zero_index, zero_value)
# for func, opts in zip(optimization_funcs, regressed_opts)
# ]
# index_list = list(domain)
prediction_list = []
date_seen_dict.pop('UNKNOWN', None)
bar_label_list = sorted(date_seen_dict.keys())
bar_value_list1 = [ date_taken_dict[date][0] for date in bar_label_list ]
bar_value_list2 = [ date_taken_dict[date][1] for date in bar_label_list ]
bar_value_list3 = [ date_seen_dict[date][0] for date in bar_label_list ]
bar_value_list4 = [ date_seen_dict[date][1] for date in bar_label_list ]
bar_value_list5 = [ date_seen_dict[date][2] for date in bar_label_list ]
bar_value_list6 = [ date_seen_dict[date][3] for date in bar_label_list ]
# label_list += ['Models'] + [''] * (len(index_list) - len(label_list) - 1)
# value_list += [0] * (len(index_list) - len(value_list))
# Counts
imgsetid_list = ibs.get_valid_imgsetids()
gid_list = ibs.get_valid_gids()
gid_list = filter_images_imageset(gid_list)
aid_list = ibs.get_valid_aids()
aid_list = filter_annots_imageset(aid_list)
nid_list = ibs.get_valid_nids()
nid_list = filter_names_imageset(nid_list)
# contrib_list = ibs.get_valid_contrib_rowids()
note_list = ibs.get_image_notes(gid_list)
note_list = [
','.join(note.split(',')[:-1])
for note in note_list
]
contrib_list = set(note_list)
# nid_list = ibs.get_valid_nids()
aid_list_count = ibs.filter_annots_general(aid_list, filter_kw=filter_kw)
aid_list_count = filter_annots_imageset(aid_list_count)
gid_list_count = list(set(ibs.get_annot_gids(aid_list_count)))
nid_list_count_dup = ibs.get_annot_name_rowids(aid_list_count)
nid_list_count = list(set(nid_list_count_dup))
# Calculate the Petersen-Lincoln index form the last two days
from ibeis.other import dbinfo as dbinfo_
try:
try:
raise KeyError()
vals = dbinfo_.estimate_ggr_count(ibs)
nsight1, nsight2, resight, pl_index, pl_error = vals
# pl_index = 'Undefined - Zero recaptured (k = 0)'
except KeyError:
index1 = bar_label_list.index('2016/01/30')
index2 = bar_label_list.index('2016/01/31')
c1 = bar_value_list4[index1]
c2 = bar_value_list4[index2]
c3 = bar_value_list6[index2]
pl_index, pl_error = dbinfo_.sight_resight_count(c1, c2, c3)
except (IndexError, ValueError):
pl_index = 0
pl_error = 0
# Get the markers
gid_list_markers = ibs.get_annot_gids(aid_list_count)
gps_list_markers = map(list, ibs.get_image_gps(gid_list_markers))
gps_list_markers_all = map(list, ibs.get_image_gps(gid_list))
REMOVE_DUP_CODE = True
if not REMOVE_DUP_CODE:
# Get the tracks
nid_track_dict = ut.ddict(list)
for nid, gps in zip(nid_list_count_dup, gps_list_markers):
if gps[0] == -1.0 and gps[1] == -1.0:
continue
nid_track_dict[nid].append(gps)
gps_list_tracks = [ nid_track_dict[nid] for nid in sorted(nid_track_dict.keys()) ]
else:
__nid_list, gps_track_list, aid_track_list = ibs.get_name_gps_tracks(aid_list=aid_list_count)
gps_list_tracks = list(map(lambda x: list(map(list, x)), gps_track_list))
gps_list_markers = [ gps for gps in gps_list_markers if tuple(gps) != (-1, -1, ) ]
gps_list_markers_all = [ gps for gps in gps_list_markers_all if tuple(gps) != (-1, -1, ) ]
gps_list_tracks = [
[ gps for gps in gps_list_track if tuple(gps) != (-1, -1, ) ]
for gps_list_track in gps_list_tracks
]
valid_aids = ibs.get_valid_aids()
valid_aids = filter_annots_imageset(valid_aids)
used_gids = list(set( ibs.get_annot_gids(valid_aids) ))
# used_contrib_tags = list(set( ibs.get_image_contributor_tag(used_gids) ))
note_list = ibs.get_image_notes(used_gids)
note_list = [
','.join(note.split(',')[:-1])
for note in note_list
]
used_contrib_tags = set(note_list)
# Get Age and sex (By Annot)
# annot_sex_list = ibs.get_annot_sex(valid_aids_)
# annot_age_months_est_min = ibs.get_annot_age_months_est_min(valid_aids_)
# annot_age_months_est_max = ibs.get_annot_age_months_est_max(valid_aids_)
# age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
# for sex, min_age, max_age in zip(annot_sex_list, annot_age_months_est_min, annot_age_months_est_max):
# if sex not in [0, 1]:
# sex = 2
# # continue
# if (min_age is None or min_age < 12) and max_age < 12:
# age_list[sex][0] += 1
# elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
# age_list[sex][1] += 1
# elif 36 <= min_age and (36 <= max_age or max_age is None):
# age_list[sex][2] += 1
# Get Age and sex (By Name)
name_sex_list = ibs.get_name_sex(nid_list_count)
name_age_months_est_mins_list = ibs.get_name_age_months_est_min(nid_list_count)
name_age_months_est_maxs_list = ibs.get_name_age_months_est_max(nid_list_count)
age_list = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
age_unreviewed = 0
age_ambiguous = 0
for nid, sex, min_ages, max_ages in zip(nid_list_count, name_sex_list, name_age_months_est_mins_list, name_age_months_est_maxs_list):
if len(set(min_ages)) > 1 or len(set(max_ages)) > 1:
# print('[web] Invalid name %r: Cannot have more than one age' % (nid, ))
age_ambiguous += 1
continue
min_age = None
max_age = None
if len(min_ages) > 0:
min_age = min_ages[0]
if len(max_ages) > 0:
max_age = max_ages[0]
# Histogram
if (min_age is None and max_age is None) or (min_age is -1 and max_age is -1):
# print('[web] Unreviewded name %r: Specify the age for the name' % (nid, ))
age_unreviewed += 1
continue
if sex not in [0, 1]:
sex = 2
# continue
if (min_age is None or min_age < 12) and max_age < 12:
age_list[sex][0] += 1
elif 12 <= min_age and min_age < 24 and 12 <= max_age and max_age < 24:
age_list[sex][1] += 1
elif 24 <= min_age and min_age < 36 and 24 <= max_age and max_age < 36:
age_list[sex][2] += 1
elif 36 <= min_age and (36 <= max_age or max_age is None):
age_list[sex][3] += 1
age_total = sum(map(sum, age_list)) + age_unreviewed + age_ambiguous
age_total = np.nan if age_total == 0 else age_total
age_fmt_str = (lambda x: '% 4d (% 2.02f%%)' % (x, 100 * x / age_total, ))
age_str_list = [
[
age_fmt_str(age)
for age in age_list_
]
for age_list_ in age_list
]
age_str_list.append(age_fmt_str(age_unreviewed))
age_str_list.append(age_fmt_str(age_ambiguous))
# dbinfo_str = dbinfo()
dbinfo_str = 'SKIPPED DBINFO'
path_dict = ibs.compute_ggr_path_dict()
if 'North' in path_dict:
path_dict.pop('North')
if 'Core' in path_dict:
path_dict.pop('Core')
return appf.template('view',
line_index_list=index_list,
line_label_list=label_list,
line_value_list=value_list,
prediction_list=prediction_list,
pl_index=pl_index,
pl_error=pl_error,
gps_list_markers=gps_list_markers,
gps_list_markers_all=gps_list_markers_all,
gps_list_tracks=gps_list_tracks,
path_dict=path_dict,
bar_label_list=bar_label_list,
bar_value_list1=bar_value_list1,
bar_value_list2=bar_value_list2,
bar_value_list3=bar_value_list3,
bar_value_list4=bar_value_list4,
bar_value_list5=bar_value_list5,
bar_value_list6=bar_value_list6,
age_list=age_list,
age_str_list=age_str_list,
age_ambiguous=age_ambiguous,
age_unreviewed=age_unreviewed,
age_total=age_total,
dbinfo_str=dbinfo_str,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
contrib_list=contrib_list,
contrib_list_str=','.join(map(str, contrib_list)),
num_contribs=len(contrib_list),
gid_list_count=gid_list_count,
gid_list_count_str=','.join(map(str, gid_list_count)),
num_gids_count=len(gid_list_count),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
aid_list_count=aid_list_count,
aid_list_count_str=','.join(map(str, aid_list_count)),
num_aids_count=len(aid_list_count),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
nid_list_count=nid_list_count,
nid_list_count_str=','.join(map(str, nid_list_count)),
num_nids_count=len(nid_list_count),
used_gids=used_gids,
num_used_gids=len(used_gids),
used_contribs=used_contrib_tags,
num_used_contribs=len(used_contrib_tags),
__wrapper_header__=False)
@register_route('/view/imagesets/', methods=['GET'])
def view_imagesets():
ibs = current_app.ibs
filtered = True
imgsetid = request.args.get('imgsetid', '')
if len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
else:
imgsetid_list = ibs.get_valid_imgsetids()
filtered = False
start_time_posix_list = ibs.get_imageset_start_time_posix(imgsetid_list)
datetime_list = [
ut.unixtime_to_datetimestr(start_time_posix)
if start_time_posix is not None else
'Unknown'
for start_time_posix in start_time_posix_list
]
gids_list = [ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ]
aids_list = [ ut.flatten(ibs.get_image_aids(gid_list)) for gid_list in gids_list ]
images_reviewed_list = [ appf.imageset_image_processed(ibs, gid_list) for gid_list in gids_list ]
annots_reviewed_viewpoint_list = [ appf.imageset_annot_viewpoint_processed(ibs, aid_list) for aid_list in aids_list ]
annots_reviewed_quality_list = [ appf.imageset_annot_quality_processed(ibs, aid_list) for aid_list in aids_list ]
image_processed_list = [ images_reviewed.count(True) for images_reviewed in images_reviewed_list ]
annot_processed_viewpoint_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_viewpoint_list ]
annot_processed_quality_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_quality_list ]
reviewed_list = [ all(images_reviewed) and all(annots_reviewed_viewpoint) and all(annot_processed_quality) for images_reviewed, annots_reviewed_viewpoint, annot_processed_quality in zip(images_reviewed_list, annots_reviewed_viewpoint_list, annots_reviewed_quality_list) ]
imageset_list = zip(
imgsetid_list,
ibs.get_imageset_text(imgsetid_list),
ibs.get_imageset_num_gids(imgsetid_list),
image_processed_list,
ibs.get_imageset_num_aids(imgsetid_list),
annot_processed_viewpoint_list,
annot_processed_quality_list,
start_time_posix_list,
datetime_list,
reviewed_list,
)
imageset_list.sort(key=lambda t: t[7])
return appf.template('view', 'imagesets',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
imageset_list=imageset_list,
num_imagesets=len(imageset_list))
@register_route('/view/image/<gid>/', methods=['GET'])
def image_view_api(gid=None, thumbnail=False, fresh=False, **kwargs):
r"""
Returns the base64 encoded image of image <gid>
RESTful:
Method: GET
URL: /image/view/<gid>/
"""
encoded = routes_ajax.image_src(gid, thumbnail=thumbnail, fresh=fresh, **kwargs)
return appf.template(None, 'single', encoded=encoded)
@register_route('/view/images/', methods=['GET'])
def view_images():
ibs = current_app.ibs
filtered = True
imgsetid_list = []
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid) for imgsetid_ in imgsetid_list ])
else:
gid_list = ibs.get_valid_gids()
filtered = False
# Page
page_start = min(len(gid_list), (page - 1) * appf.PAGE_SIZE)
page_end = min(len(gid_list), page * appf.PAGE_SIZE)
page_total = int(math.ceil(len(gid_list) / appf.PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(gid_list) else page + 1
gid_list = gid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(gid_list), page_previous, page_next, ))
image_unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(image_unixtime)
if image_unixtime is not None
else
'Unknown'
for image_unixtime in image_unixtime_list
]
image_list = zip(
gid_list,
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_image_imgsetids(gid_list) ],
ibs.get_image_gnames(gid_list),
image_unixtime_list,
datetime_list,
ibs.get_image_gps(gid_list),
ibs.get_image_party_tag(gid_list),
ibs.get_image_contributor_tag(gid_list),
ibs.get_image_notes(gid_list),
appf.imageset_image_processed(ibs, gid_list),
)
image_list.sort(key=lambda t: t[3])
return appf.template('view', 'images',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
image_list=image_list,
num_images=len(image_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/annotations/', methods=['GET'])
def view_annotations():
ibs = current_app.ibs
filtered = True
imgsetid_list = []
gid_list = []
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
else:
aid_list = ibs.get_valid_aids()
filtered = False
# Page
page_start = min(len(aid_list), (page - 1) * appf.PAGE_SIZE)
page_end = min(len(aid_list), page * appf.PAGE_SIZE)
page_total = int(math.ceil(len(aid_list) / appf.PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(aid_list) else page + 1
aid_list = aid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(aid_list), page_previous, page_next, ))
annotation_list = zip(
aid_list,
ibs.get_annot_gids(aid_list),
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_annot_imgsetids(aid_list) ],
ibs.get_annot_image_names(aid_list),
ibs.get_annot_names(aid_list),
ibs.get_annot_exemplar_flags(aid_list),
ibs.get_annot_species_texts(aid_list),
ibs.get_annot_yaw_texts(aid_list),
ibs.get_annot_quality_texts(aid_list),
ibs.get_annot_sex_texts(aid_list),
ibs.get_annot_age_months_est(aid_list),
ibs.get_annot_reviewed(aid_list),
# [ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(appf.imageset_annot_viewpoint_processed(ibs, aid_list), appf.imageset_annot_quality_processed(ibs, aid_list)) ],
)
annotation_list.sort(key=lambda t: t[0])
return appf.template('view', 'annotations',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
annotation_list=annotation_list,
num_annotations=len(annotation_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/names/', methods=['GET'])
def view_names():
ibs = current_app.ibs
filtered = True
aid_list = []
imgsetid_list = []
gid_list = []
nid = request.args.get('nid', '')
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(nid) > 0:
nid_list = nid.strip().split(',')
nid_list = [ None if nid_ == 'None' or nid_ == '' else int(nid_) for nid_ in nid_list ]
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
else:
nid_list = ibs.get_valid_nids()
filtered = False
# Page
appf.PAGE_SIZE_ = int(appf.PAGE_SIZE / 5)
page_start = min(len(nid_list), (page - 1) * appf.PAGE_SIZE_)
page_end = min(len(nid_list), page * appf.PAGE_SIZE_)
page_total = int(math.ceil(len(nid_list) / appf.PAGE_SIZE_))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(nid_list) else page + 1
nid_list = nid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(nid_list), page_previous, page_next, ))
aids_list = ibs.get_name_aids(nid_list)
annotations_list = [ zip(
aid_list_,
ibs.get_annot_gids(aid_list_),
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_annot_imgsetids(aid_list_) ],
ibs.get_annot_image_names(aid_list_),
ibs.get_annot_names(aid_list_),
ibs.get_annot_exemplar_flags(aid_list_),
ibs.get_annot_species_texts(aid_list_),
ibs.get_annot_yaw_texts(aid_list_),
ibs.get_annot_quality_texts(aid_list_),
ibs.get_annot_sex_texts(aid_list_),
ibs.get_annot_age_months_est(aid_list_),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(appf.imageset_annot_viewpoint_processed(ibs, aid_list_), appf.imageset_annot_quality_processed(ibs, aid_list_)) ],
) for aid_list_ in aids_list ]
name_list = zip(
nid_list,
annotations_list
)
name_list.sort(key=lambda t: t[0])
return appf.template('view', 'names',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
name_list=name_list,
num_names=len(name_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/turk/', methods=['GET'])
def turk():
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
return appf.template('turk', None, imgsetid=imgsetid)
def _make_review_image_info(ibs, gid):
"""
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.web.apis_detect import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> gid = ibs.get_valid_gids()[0]
"""
# Shows how to use new object-like interface to populate data
import numpy as np
image = ibs.images([gid])[0]
annots = image.annots
width, height = image.sizes
bbox_denom = np.array([width, height, width, height])
annotation_list = []
for aid in annots.aids:
annot_ = ibs.annots(aid)[0]
bbox = np.array(annot_.bboxes)
bbox_percent = bbox / bbox_denom * 100
temp = {
'left' : bbox_percent[0],
'top' : bbox_percent[1],
'width' : bbox_percent[2],
'height' : bbox_percent[3],
'label' : annot_.species,
'id' : annot_.aids,
'theta' : annot_.thetas,
'tags' : annot_.case_tags,
}
annotation_list.append(temp)
@register_route('/turk/detection/', methods=['GET'])
def turk_detection():
ibs = current_app.ibs
refer_aid = request.args.get('refer_aid', None)
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
reviewed_list = appf.imageset_image_processed(ibs, gid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), )
imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
gid = request.args.get('gid', '')
if len(gid) > 0:
gid = int(gid)
else:
gid_list_ = ut.filterfalse_items(gid_list, reviewed_list)
if len(gid_list_) == 0:
gid = None
else:
# gid = gid_list_[0]
gid = random.choice(gid_list_)
previous = request.args.get('previous', None)
finished = gid is None
review = 'review' in request.args.keys()
display_instructions = request.cookies.get('ia-detection_instructions_seen', 1) == 0
display_species_examples = False # request.cookies.get('ia-detection_example_species_seen', 0) == 0
if not finished:
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
imgdata = ibs.get_image_imgdata(gid)
image_src = appf.embed_image_html(imgdata)
# Get annotations
width, height = ibs.get_image_sizes(gid)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = 100.0 * (annot_bbox[0] / width)
temp['top'] = 100.0 * (annot_bbox[1] / height)
temp['width'] = 100.0 * (annot_bbox[2] / width)
temp['height'] = 100.0 * (annot_bbox[3] / height)
temp['label'] = species
temp['id'] = aid
temp['theta'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif appf.default_species(ibs) is not None:
species = appf.default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
else:
gpath = None
species = None
image_src = None
annotation_list = []
callback_url = '%s?imgsetid=%s' % (url_for('submit_detection'), imgsetid, )
return appf.template('turk', 'detection',
imgsetid=imgsetid,
gid=gid,
refer_aid=refer_aid,
species=species,
image_path=gpath,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
annotation_list=annotation_list,
display_instructions=display_instructions,
display_species_examples=display_species_examples,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review)
@register_route('/turk/detection/dynamic/', methods=['GET'])
def turk_detection_dynamic():
ibs = current_app.ibs
gid = request.args.get('gid', None)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ibs.get_image_imgdata(gid)
image_src = appf.embed_image_html(image)
# Get annotations
width, height = ibs.get_image_sizes(gid)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = 100.0 * (annot_bbox[0] / width)
temp['top'] = 100.0 * (annot_bbox[1] / height)
temp['width'] = 100.0 * (annot_bbox[2] / width)
temp['height'] = 100.0 * (annot_bbox[3] / height)
temp['label'] = species
temp['id'] = aid
temp['theta'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif appf.default_species(ibs) is not None:
species = appf.default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
callback_url = '%s?imgsetid=%s' % (url_for('submit_detection'), gid, )
return appf.template('turk', 'detection_dynamic',
gid=gid,
refer_aid=None,
species=species,
image_path=gpath,
image_src=image_src,
annotation_list=annotation_list,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
__wrapper__=False)
@register_route('/turk/annotation/', methods=['GET'])
def turk_annotation():
"""
CommandLine:
python -m ibeis.web.app --exec-turk_viewpoint --db PZ_Master1
Example:
>>> # SCRIPT
>>> from ibeis.other.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-annotation_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
# image_src = routes_ajax.annotation_src(aid)
species = ibs.get_annot_species_texts(aid)
viewpoint_value = appf.convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
quality_value = ibs.get_annot_qualities(aid)
if quality_value in [-1, None]:
quality_value = None
elif quality_value > 2:
quality_value = 2
elif quality_value <= 2:
quality_value = 1
multiple_value = ibs.get_annot_multiple(aid) == 1
else:
gid = None
gpath = None
image_src = None
species = None
viewpoint_value = None
quality_value = None
multiple_value = None
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [ combined[0] for combined in combined_list ]
species_rowids = [ combined[1] for combined in combined_list ]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [ species == species_ for species_ in species_text_list ]
species_list = zip(species_nice_list, species_text_list, species_selected_list)
species_list = [ ('Unspecified', const.UNKNOWN, True) ] + species_list
callback_url = url_for('submit_annotation')
return appf.template('turk', 'annotation',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
viewpoint_value=viewpoint_value,
quality_value=quality_value,
multiple_value=multiple_value,
image_path=gpath,
image_src=image_src,
previous=previous,
species_list=species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review)
@register_route('/turk/annotation/dynamic/', methods=['GET'])
def turk_annotation_dynamic():
ibs = current_app.ibs
aid = request.args.get('aid', None)
imgsetid = request.args.get('imgsetid', None)
review = 'review' in request.args.keys()
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
species = ibs.get_annot_species_texts(aid)
viewpoint_value = appf.convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
quality_value = ibs.get_annot_qualities(aid)
if quality_value == -1:
quality_value = None
if quality_value == 0:
quality_value = 1
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [ combined[0] for combined in combined_list ]
species_rowids = [ combined[1] for combined in combined_list ]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [ species == species_ for species_ in species_text_list ]
species_list = zip(species_nice_list, species_text_list, species_selected_list)
species_list = [ ('Unspecified', const.UNKNOWN, True) ] + species_list
callback_url = url_for('submit_annotation')
return appf.template('turk', 'annotation_dynamic',
imgsetid=imgsetid,
gid=gid,
aid=aid,
viewpoint_value=viewpoint_value,
quality_value=quality_value,
image_path=gpath,
image_src=image_src,
species_list=species_list,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review,
__wrapper__=False)
@register_route('/turk/viewpoint/', methods=['GET'])
def turk_viewpoint():
"""
CommandLine:
python -m ibeis.web.app --exec-turk_viewpoint --db PZ_Master1
Example:
>>> # SCRIPT
>>> from ibeis.other.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_viewpoint_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
value = appf.convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-viewpoint_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
species = ibs.get_annot_species_texts(aid)
else:
gid = None
gpath = None
image_src = None
species = None
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [ combined[0] for combined in combined_list ]
species_rowids = [ combined[1] for combined in combined_list ]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [ species == species_ for species_ in species_text_list ]
species_list = zip(species_nice_list, species_text_list, species_selected_list)
species_list = [ ('Unspecified', const.UNKNOWN, True) ] + species_list
return appf.template('turk', 'viewpoint',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
species_list=species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/turk/quality/', methods=['GET'])
def turk_quality():
"""
PZ Needs Tags:
17242
14468
14427
15946
14771
14084
4102
6074
3409
GZ Needs Tags;
1302
CommandLine:
python -m ibeis.web.app --exec-turk_quality --db PZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GIRM_Master1
Example:
>>> # SCRIPT
>>> from ibeis.other.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aid_list_ = ibs.find_unlabeled_name_members(qual=True)
>>> valid_views = ['primary', 'primary1', 'primary-1']
>>> aid_list = ibs.filter_aids_to_viewpoint(aid_list_, valid_views, unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_quality_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
value = ibs.get_annot_qualities(aid)
if value == -1:
value = None
if value == 0:
value = 1
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-quality_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
imagesettext = ibs.get_imageset_text(imgsetid)
return appf.template('turk', 'quality',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/turk/additional/', methods=['GET'])
def turk_additional():
ibs = current_app.ibs
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_nids(aid_list)
reviewed_list = appf.imageset_annot_additional_processed(ibs, aid_list, nid_list)
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
except ZeroDivisionError:
progress = '0.00'
imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value_sex = ibs.get_annot_sex([aid])[0]
if value_sex >= 0:
value_sex += 2
else:
value_sex = None
value_age_min, value_age_max = ibs.get_annot_age_months_est([aid])[0]
value_age = None
if (value_age_min is -1 or value_age_min is None) and (value_age_max is -1 or value_age_max is None):
value_age = 1
if (value_age_min is 0 or value_age_min is None) and value_age_max == 2:
value_age = 2
elif value_age_min is 3 and value_age_max == 5:
value_age = 3
elif value_age_min is 6 and value_age_max == 11:
value_age = 4
elif value_age_min is 12 and value_age_max == 23:
value_age = 5
elif value_age_min is 24 and value_age_max == 35:
value_age = 6
elif value_age_min is 36 and (value_age_max > 36 or value_age_max is None):
value_age = 7
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-additional_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
name_aid_list = None
nid = ibs.get_annot_name_rowids(aid)
if nid is not None:
name_aid_list = ibs.get_name_aids(nid)
quality_list = ibs.get_annot_qualities(name_aid_list)
quality_text_list = ibs.get_annot_quality_texts(name_aid_list)
yaw_text_list = ibs.get_annot_yaw_texts(name_aid_list)
name_aid_combined_list = list(zip(
name_aid_list,
quality_list,
quality_text_list,
yaw_text_list,
))
name_aid_combined_list.sort(key=lambda t: t[1], reverse=True)
else:
name_aid_combined_list = []
region_str = 'UNKNOWN'
if aid is not None and gid is not None:
imgsetid_list = ibs.get_image_imgsetids(gid)
imgset_text_list = ibs.get_imageset_text(imgsetid_list)
imgset_text_list = [
imgset_text
for imgset_text in imgset_text_list
if 'GGR Special Zone' in imgset_text
]
assert len(imgset_text_list) < 2
if len(imgset_text_list) == 1:
region_str = imgset_text_list[0]
return appf.template('turk', 'additional',
imgsetid=imgsetid,
gid=gid,
aid=aid,
region_str=region_str,
value_sex=value_sex,
value_age=value_age,
image_path=gpath,
name_aid_combined_list=name_aid_combined_list,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/group_review/', methods=['GET'])
def group_review():
prefill = request.args.get('prefill', '')
if len(prefill) > 0:
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
bad_species_list, bad_viewpoint_list = ibs.validate_annot_species_viewpoint_cnn(aid_list)
GROUP_BY_PREDICTION = True
if GROUP_BY_PREDICTION:
grouped_dict = ut.group_items(bad_viewpoint_list, ut.get_list_column(bad_viewpoint_list, 3))
grouped_list = grouped_dict.values()
regrouped_items = ut.flatten(ut.sortedby(grouped_list, map(len, grouped_list)))
candidate_aid_list = ut.get_list_column(regrouped_items, 0)
else:
candidate_aid_list = [ bad_viewpoint[0] for bad_viewpoint in bad_viewpoint_list]
elif request.args.get('aid_list', None) is not None:
aid_list = request.args.get('aid_list', '')
if len(aid_list) > 0:
aid_list = aid_list.replace('[', '')
aid_list = aid_list.replace(']', '')
aid_list = aid_list.strip().split(',')
candidate_aid_list = [ int(aid_.strip()) for aid_ in aid_list ]
else:
candidate_aid_list = ''
else:
candidate_aid_list = ''
return appf.template(None, 'group_review', candidate_aid_list=candidate_aid_list, mode_list=appf.VALID_TURK_MODES)
@register_route('/sightings/', methods=['GET'])
def sightings(html_encode=True):
ibs = current_app.ibs
complete = request.args.get('complete', None) is not None
sightings = ibs.report_sightings_str(complete=complete, include_images=True)
if html_encode:
sightings = sightings.replace('\n', '<br/>')
return sightings
@register_route('/api/', methods=['GET'], __api_prefix_check__=False)
def api_root():
rules = current_app.url_map.iter_rules()
rule_dict = {}
for rule in rules:
methods = rule.methods
url = str(rule)
if '/api/' in url:
methods -= set(['HEAD', 'OPTIONS'])
if len(methods) == 0:
continue
if len(methods) > 1:
print('methods = %r' % (methods,))
method = list(methods)[0]
if method not in rule_dict.keys():
rule_dict[method] = []
rule_dict[method].append((method, url, ))
for method in rule_dict.keys():
rule_dict[method].sort()
url = '%s/api/core/dbname/' % (current_app.server_url, )
app_auth = controller_inject.get_url_authorization(url)
return appf.template(None, 'api',
app_url=url,
app_name=controller_inject.GLOBAL_APP_NAME,
app_secret=controller_inject.GLOBAL_APP_SECRET,
app_auth=app_auth,
rule_list=rule_dict)
@register_route('/upload/', methods=['GET'])
def upload():
return appf.template(None, 'upload')
@register_route('/dbinfo/', methods=['GET'])
def dbinfo():
try:
ibs = current_app.ibs
dbinfo_str = ibs.get_dbinfo_str()
except:
dbinfo_str = ''
dbinfo_str_formatted = '<pre>%s</pre>' % (dbinfo_str, )
return dbinfo_str_formatted
@register_route('/counts/', methods=['GET'])
def wb_counts():
fmt_str = '''<p># Annotations: <b>%d</b></p>
<p># MediaAssets (images): <b>%d</b></p>
<p># MarkedIndividuals: <b>%d</b></p>
<p># Encounters: <b>%d</b></p>
<p># Occurrences: <b>%d</b></p>'''
try:
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
nid_list = ibs.get_annot_nids(aid_list)
nid_list = [ nid for nid in nid_list if nid > 0 ]
gid_list = ibs.get_annot_gids(aid_list)
imgset_id_list = ibs.get_valid_imgsetids()
aids_list = ibs.get_imageset_aids(imgset_id_list)
imgset_id_list = [
imgset_id
for imgset_id, aid_list_ in zip(imgset_id_list, aids_list)
if len(aid_list_) > 0
]
valid_nid_list = list(set(nid_list))
valid_aid_list = list(set(aid_list))
valid_gid_list = list(set(gid_list))
valid_imgset_id_list = list(set(imgset_id_list))
valid_imgset_id_list = list(set(imgset_id_list))
aids_list = ibs.get_imageset_aids(valid_imgset_id_list)
nids_list = map(ibs.get_annot_nids, aids_list)
nids_list = map(set, nids_list)
nids_list = ut.flatten(nids_list)
num_nid = len(valid_nid_list)
num_aid = len(valid_aid_list)
num_gid = len(valid_gid_list)
num_imgset = len(valid_imgset_id_list)
num_encounters = len(nids_list)
args = (num_aid, num_gid, num_nid, num_encounters, num_imgset, )
counts_str = fmt_str % args
except:
counts_str = ''
return counts_str
@register_route('/test/counts.jsp', methods=['GET'], __api_postfix_check__=False)
def wb_counts_alias1():
return wb_counts()
@register_route('/gzgc/counts.jsp', methods=['GET'], __api_postfix_check__=False)
def wb_counts_alias2():
return wb_counts()
@register_route('/404/', methods=['GET'])
def error404(exception=None):
import traceback
exception_str = str(exception)
traceback_str = str(traceback.format_exc())
print('[web] %r' % (exception_str, ))
print('[web] %r' % (traceback_str, ))
return appf.template(None, '404', exception_str=exception_str,
traceback_str=traceback_str)
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.web.app
python -m ibeis.web.app --allexamples
python -m ibeis.web.app --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 | -1,131,818,135,503,844,000 | 39.833217 | 275 | 0.558516 | false |
rlaboiss/pelican-plugins | post_stats/readability.py | 73 | 1362 | # -*- coding: utf-8 -*-
# Adadpted from here: http://acdx.net/calculating-the-flesch-kincaid-level-in-python/
# See here for details: http://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_test
from __future__ import division
import re
def mean(seq):
return sum(seq) / len(seq)
def syllables(word):
if len(word) <= 3:
return 1
word = re.sub(r"(es|ed|(?<!l)e)$", "", word)
return len(re.findall(r"[aeiouy]+", word))
def normalize(text):
terminators = ".!?:;"
term = re.escape(terminators)
text = re.sub(r"[^%s\sA-Za-z]+" % term, "", text)
text = re.sub(r"\s*([%s]+\s*)+" % term, ". ", text)
return re.sub(r"\s+", " ", text)
def text_stats(text, wc):
text = normalize(text)
stcs = [s.split(" ") for s in text.split(". ")]
stcs = [s for s in stcs if len(s) >= 2]
if wc:
words = wc
else:
words = sum(len(s) for s in stcs)
sbls = sum(syllables(w) for s in stcs for w in s)
return len(stcs), words, sbls
def flesch_index(stats):
stcs, words, sbls = stats
if stcs == 0 or words == 0:
return 0
return 206.835 - 1.015 * (words / stcs) - 84.6 * (sbls / words)
def flesch_kincaid_level(stats):
stcs, words, sbls = stats
if stcs == 0 or words == 0:
return 0
return 0.39 * (words / stcs) + 11.8 * (sbls / words) - 15.59
| agpl-3.0 | -3,312,280,517,650,598,000 | 23.321429 | 92 | 0.569016 | false |
shrimo/PyQt4 | examples/webkit/previewer/ui_previewer.py | 6 | 3467 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'previewer.ui'
#
# Created: Mon Nov 29 17:09:55 2010
# by: PyQt4 UI code generator snapshot-4.8.2-241fbaf4620d
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(911, 688)
self.horizontalLayout_4 = QtGui.QHBoxLayout(Form)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.splitter = QtGui.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.editorBox = QtGui.QGroupBox(self.splitter)
self.editorBox.setObjectName(_fromUtf8("editorBox"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.editorBox)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.plainTextEdit = QtGui.QPlainTextEdit(self.editorBox)
self.plainTextEdit.setObjectName(_fromUtf8("plainTextEdit"))
self.verticalLayout_2.addWidget(self.plainTextEdit)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.clearButton = QtGui.QPushButton(self.editorBox)
self.clearButton.setObjectName(_fromUtf8("clearButton"))
self.horizontalLayout.addWidget(self.clearButton)
self.previewButton = QtGui.QPushButton(self.editorBox)
self.previewButton.setObjectName(_fromUtf8("previewButton"))
self.horizontalLayout.addWidget(self.previewButton)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.previewerBox = QtGui.QGroupBox(self.splitter)
self.previewerBox.setObjectName(_fromUtf8("previewerBox"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.previewerBox)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.webView = QtWebKit.QWebView(self.previewerBox)
self.webView.setUrl(QtCore.QUrl(_fromUtf8("about:blank")))
self.webView.setObjectName(_fromUtf8("webView"))
self.horizontalLayout_3.addWidget(self.webView)
self.horizontalLayout_4.addWidget(self.splitter)
self.retranslateUi(Form)
QtCore.QObject.connect(self.clearButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.plainTextEdit.clear)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.editorBox.setTitle(QtGui.QApplication.translate("Form", "HTML Editor", None, QtGui.QApplication.UnicodeUTF8))
self.clearButton.setText(QtGui.QApplication.translate("Form", "Clear", None, QtGui.QApplication.UnicodeUTF8))
self.previewButton.setText(QtGui.QApplication.translate("Form", "Preview", None, QtGui.QApplication.UnicodeUTF8))
self.previewerBox.setTitle(QtGui.QApplication.translate("Form", "HTML Preview", None, QtGui.QApplication.UnicodeUTF8))
from PyQt4 import QtWebKit
| gpl-2.0 | 8,707,240,979,276,832,000 | 51.530303 | 126 | 0.726565 | false |
zstyblik/infernal-twin | build/reportlab/tests/test_platypus_accum.py | 14 | 3168 | from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os,unittest
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle, LongTable
from reportlab.platypus.doctemplate import PageAccumulator
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch, cm
from reportlab.lib.utils import simpleSplit
from reportlab.lib import colors
styleSheet = getSampleStyleSheet()
class MyPageAccumulator(PageAccumulator):
def pageEndAction(self,canv,doc):
L42 = [x[0] for x in self.data if not x[0]%42]
L13 = [x[0] for x in self.data if not x[0]%13]
if L42 and L13:
s = 'Saw multiples of 13 and 42'
elif L13:
s = 'Saw multiples of 13'
elif L42:
s = 'Saw multiples of 42'
else:
return
canv.saveState()
canv.setFillColor(colors.purple)
canv.setFont("Helvetica",6)
canv.drawString(1*inch,1*inch,s)
canv.restoreState()
PA = MyPageAccumulator('_42_divides')
class MyDocTemplate(SimpleDocTemplate):
def beforeDocument(self):
for pt in self.pageTemplates:
PA.attachToPageTemplate(pt)
def textAccum2():
doc = MyDocTemplate(outputfile('test_platypus_accum2.pdf'),
pagesize=(8.5*inch, 11*inch), showBoundary=1)
story=[]
story.append(Paragraph("A table with 500 rows", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
('FONTNAME',(0,0),(-1,-1),'Helvetica'),
('FONTSIZE',(0,0),(-1,-1),10),
]
def myCV(s,fontName='Helvetica',fontSize=10,maxWidth=72):
return '\n'.join(simpleSplit(s,fontName,fontSize,maxWidth))
data = [[PA.onDrawStr(str(i+1),i+1),
myCV("xx "* (i%10),maxWidth=100-12),
myCV("blah "*(i%40),maxWidth=200-12)]
for i in range(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
story.append(t)
doc.build(story)
def textAccum1():
doc = MyDocTemplate(outputfile('test_platypus_accum1.pdf'),
pagesize=(8.5*inch, 11*inch), showBoundary=1)
story=[]
story.append(Paragraph("A table with 500 rows", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
]
data = [[str(i+1), Paragraph("xx "* (i%10),
styleSheet["BodyText"]),
Paragraph(("blah "*(i%40))+PA.onDrawText(i+1), styleSheet["BodyText"])]
for i in range(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
story.append(t)
doc.build(story)
class TablesTestCase(unittest.TestCase):
"Make documents with tables"
def test1(self):
textAccum1()
def test2(self):
textAccum2()
def makeSuite():
return makeSuiteForClasses(TablesTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| gpl-3.0 | -5,599,083,950,257,946,000 | 33.813187 | 92 | 0.619949 | false |
kiall/designate-py3 | tools/install_venv.py | 11 | 2341 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import install_venv_common as install_venv # noqa
def print_help(venv, root):
help = """
Designate development environment setup is complete.
Designate development uses virtualenv to track and manage Python
dependencies while in development and testing.
To activate the Designate virtualenv for the extent of your current shell
session you can run:
$ source %s/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ %s/tools/with_venv.sh <your command>
"""
print(help % (venv, root))
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if os.environ.get('tools_path'):
root = os.environ['tools_path']
venv = os.path.join(root, '.venv')
if os.environ.get('venv'):
venv = os.environ['venv']
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Designate'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
print_help(venv, root)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | -1,269,447,356,887,836,700 | 32.927536 | 79 | 0.689876 | false |
mangalaman93/docker-py | docker/utils/types.py | 43 | 2304 | import six
class LogConfigTypesEnum(object):
_values = (
'json-file',
'syslog',
'journald',
'gelf',
'fluentd',
'none'
)
JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
class DictType(dict):
def __init__(self, init):
for k, v in six.iteritems(init):
self[k] = v
class LogConfig(DictType):
types = LogConfigTypesEnum
def __init__(self, **kwargs):
log_driver_type = kwargs.get('type', kwargs.get('Type'))
config = kwargs.get('config', kwargs.get('Config')) or {}
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
super(LogConfig, self).__init__({
'Type': log_driver_type,
'Config': config
})
@property
def type(self):
return self['Type']
@type.setter
def type(self, value):
self['Type'] = value
@property
def config(self):
return self['Config']
def set_config_value(self, key, value):
self.config[key] = value
def unset_config(self, key):
if key in self.config:
del self.config[key]
class Ulimit(DictType):
def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
if not isinstance(name, six.string_types):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
super(Ulimit, self).__init__({
'Name': name,
'Soft': soft,
'Hard': hard
})
@property
def name(self):
return self['Name']
@name.setter
def name(self, value):
self['Name'] = value
@property
def soft(self):
return self.get('Soft')
@soft.setter
def soft(self, value):
self['Soft'] = value
@property
def hard(self):
return self.get('Hard')
@hard.setter
def hard(self, value):
self['Hard'] = value
| apache-2.0 | 7,601,276,588,095,561,000 | 23 | 69 | 0.547743 | false |
alander/StarCluster | starcluster/commands/removenode.py | 19 | 5236 | # Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
import warnings
from starcluster.logger import log
from starcluster.commands.completers import ClusterCompleter
class CmdRemoveNode(ClusterCompleter):
"""
removenode [options] <cluster_tag>
Terminate one or more nodes in the cluster
Examples:
$ starcluster removenode mycluster
This will automatically fetch a single worker node, detach it from the
cluster, and then terminate it. If you'd rather be specific about which
node(s) to remove then use the -a option:
$ starcluster removenode mycluster -a node003
You can also specify multiple nodes to remove and terminate one after
another, e.g.:
$ starcluster removenode mycluster -n 3
or
$ starcluster removenode mycluster -a node001,node002,node003
If you'd rather not terminate the node(s) after detaching from the cluster,
use the -k option:
$ starcluster removenode -k mycluster -a node001,node002,node003
This will detach the nodes from the cluster but leave the instances
running. These nodes can then later be reattached to the cluster using:
$ starcluster addnode mycluster -x -a node001,node002,node003
This can be useful, for example, when testing on_add_node and
on_remove_node methods in a StarCluster plugin.
"""
names = ['removenode', 'rn']
tag = None
def addopts(self, parser):
parser.add_option("-f", "--force", dest="force", action="store_true",
default=False, help="Terminate node regardless "
"of errors if possible ")
parser.add_option("-k", "--keep-instance", dest="terminate",
action="store_false", default=True,
help="do not terminate nodes "
"after detaching them from the cluster")
parser.add_option("-c", "--confirm", dest="confirm",
action="store_true", default=False,
help="Do not prompt for confirmation, "
"just remove the node(s)")
parser.add_option("-n", "--num-nodes", dest="num_nodes",
action="store", type="int", default=1,
help="number of nodes to remove")
parser.add_option("-a", "--aliases", dest="aliases", action="append",
type="string", default=[],
help="list of nodes to remove (e.g. "
"node001,node002,node003)")
def execute(self, args):
if not len(args) >= 1:
self.parser.error("please specify a cluster <cluster_tag>")
if len(args) >= 2:
warnings.warn(
"Passing node names as arguments is deprecated. Please "
"start using the -a option. Pass --help for more details",
DeprecationWarning)
tag = self.tag = args[0]
aliases = []
for alias in self.opts.aliases:
aliases.extend(alias.split(','))
old_form_aliases = args[1:]
if old_form_aliases:
if aliases:
self.parser.error(
"you must either use a list of nodes as arguments OR "
"use the -a option - not both")
else:
aliases = old_form_aliases
if ('master' in aliases) or ('%s-master' % tag in aliases):
self.parser.error(
"'master' and '%s-master' are reserved aliases" % tag)
num_nodes = self.opts.num_nodes
if num_nodes == 1 and aliases:
num_nodes = len(aliases)
if num_nodes > 1 and aliases and len(aliases) != num_nodes:
self.parser.error("you must specify the same number of aliases "
"(-a) as nodes (-n)")
dupe = self._get_duplicate(aliases)
if dupe:
self.parser.error("cannot have duplicate aliases (duplicate: %s)" %
dupe)
if not self.opts.confirm:
resp = raw_input("Remove %s from %s (y/n)? " %
(', '.join(aliases) or '%s nodes' % num_nodes,
tag))
if resp not in ['y', 'Y', 'yes']:
log.info("Aborting...")
return
self.cm.remove_nodes(tag, aliases=aliases, num_nodes=num_nodes,
terminate=self.opts.terminate,
force=self.opts.force)
| gpl-3.0 | 5,828,462,376,033,988,000 | 40.228346 | 79 | 0.582124 | false |
mbrugg/MC-EWIO64-ORG | board/pxa255_idp/pxa_reg_calcs.py | 65 | 11003 | #!/usr/bin/python
# (C) Copyright 2004
# BEC Systems <http://bec-systems.com>
# Cliff Brake <[email protected]>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
# calculations for PXA255 registers
class gpio:
dir = '0'
set = '0'
clr = '0'
alt = '0'
desc = ''
def __init__(self, dir=0, set=0, clr=0, alt=0, desc=''):
self.dir = dir
self.set = set
self.clr = clr
self.alt = alt
self.desc = desc
# the following is a dictionary of all GPIOs in the system
# the key is the GPIO number
pxa255_alt_func = {
0: ['gpio', 'none', 'none', 'none'],
1: ['gpio', 'gpio reset', 'none', 'none'],
2: ['gpio', 'none', 'none', 'none'],
3: ['gpio', 'none', 'none', 'none'],
4: ['gpio', 'none', 'none', 'none'],
5: ['gpio', 'none', 'none', 'none'],
6: ['gpio', 'MMC clk', 'none', 'none'],
7: ['gpio', '48MHz clock', 'none', 'none'],
8: ['gpio', 'MMC CS0', 'none', 'none'],
9: ['gpio', 'MMC CS1', 'none', 'none'],
10: ['gpio', 'RTC Clock', 'none', 'none'],
11: ['gpio', '3.6MHz', 'none', 'none'],
12: ['gpio', '32KHz', 'none', 'none'],
13: ['gpio', 'none', 'MBGNT', 'none'],
14: ['gpio', 'MBREQ', 'none', 'none'],
15: ['gpio', 'none', 'nCS_1', 'none'],
16: ['gpio', 'none', 'PWM0', 'none'],
17: ['gpio', 'none', 'PWM1', 'none'],
18: ['gpio', 'RDY', 'none', 'none'],
19: ['gpio', 'DREQ[1]', 'none', 'none'],
20: ['gpio', 'DREQ[0]', 'none', 'none'],
21: ['gpio', 'none', 'none', 'none'],
22: ['gpio', 'none', 'none', 'none'],
23: ['gpio', 'none', 'SSP SCLK', 'none'],
24: ['gpio', 'none', 'SSP SFRM', 'none'],
25: ['gpio', 'none', 'SSP TXD', 'none'],
26: ['gpio', 'SSP RXD', 'none', 'none'],
27: ['gpio', 'SSP EXTCLK', 'none', 'none'],
28: ['gpio', 'AC97 bitclk in, I2S bitclock out', 'I2S bitclock in', 'none'],
29: ['gpio', 'AC97 SDATA_IN0', 'I2S SDATA_IN', 'none'],
30: ['gpio', 'I2S SDATA_OUT', 'AC97 SDATA_OUT', 'none'],
31: ['gpio', 'I2S SYNC', 'AC97 SYNC', 'none'],
32: ['gpio', 'AC97 SDATA_IN1', 'I2S SYSCLK', 'none'],
33: ['gpio', 'none', 'nCS_5', 'none'],
34: ['gpio', 'FF RXD', 'MMC CS0', 'none'],
35: ['gpio', 'FF CTS', 'none', 'none'],
36: ['gpio', 'FF DCD', 'none', 'none'],
37: ['gpio', 'FF DSR', 'none', 'none'],
38: ['gpio', 'FF RI', 'none', 'none'],
39: ['gpio', 'MMC CS1', 'FF TXD', 'none'],
40: ['gpio', 'none', 'FF DTR', 'none'],
41: ['gpio', 'none', 'FF RTS', 'none'],
42: ['gpio', 'BT RXD', 'none', 'HW RXD'],
43: ['gpio', 'none', 'BT TXD', 'HW TXD'],
44: ['gpio', 'BT CTS', 'none', 'HW CTS'],
45: ['gpio', 'none', 'BT RTS', 'HW RTS'],
46: ['gpio', 'ICP_RXD', 'STD RXD', 'none'],
47: ['gpio', 'STD TXD', 'ICP_TXD', 'none'],
48: ['gpio', 'HW TXD', 'nPOE', 'none'],
49: ['gpio', 'HW RXD', 'nPWE', 'none'],
50: ['gpio', 'HW CTS', 'nPIOR', 'none'],
51: ['gpio', 'nPIOW', 'HW RTS', 'none'],
52: ['gpio', 'none', 'nPCE[1]', 'none'],
53: ['gpio', 'MMC CLK', 'nPCE[2]', 'none'],
54: ['gpio', 'MMC CLK', 'nPSKSEL', 'none'],
55: ['gpio', 'none', 'nPREG', 'none'],
56: ['gpio', 'nPWAIT', 'none', 'none'],
57: ['gpio', 'nIOIS16', 'none', 'none'],
58: ['gpio', 'none', 'LDD[0]', 'none'],
59: ['gpio', 'none', 'LDD[1]', 'none'],
60: ['gpio', 'none', 'LDD[2]', 'none'],
61: ['gpio', 'none', 'LDD[3]', 'none'],
62: ['gpio', 'none', 'LDD[4]', 'none'],
63: ['gpio', 'none', 'LDD[5]', 'none'],
64: ['gpio', 'none', 'LDD[6]', 'none'],
65: ['gpio', 'none', 'LDD[7]', 'none'],
66: ['gpio', 'MBREQ', 'LDD[8]', 'none'],
67: ['gpio', 'MMC CS0', 'LDD[9]', 'none'],
68: ['gpio', 'MMC CS1', 'LDD[10]', 'none'],
69: ['gpio', 'MMC CLK', 'LDD[11]', 'none'],
70: ['gpio', 'RTC CLK', 'LDD[12]', 'none'],
71: ['gpio', '3.6 MHz', 'LDD[13]', 'none'],
72: ['gpio', '32 KHz', 'LDD[14]', 'none'],
73: ['gpio', 'MBGNT', 'LDD[15]', 'none'],
74: ['gpio', 'none', 'LCD_FCLK', 'none'],
75: ['gpio', 'none', 'LCD_LCLK', 'none'],
76: ['gpio', 'none', 'LCD_PCLK', 'none'],
77: ['gpio', 'none', 'LCD_ACBIAS', 'none'],
78: ['gpio', 'none', 'nCS_2', 'none'],
79: ['gpio', 'none', 'nCS_3', 'none'],
80: ['gpio', 'none', 'nCS_4', 'none'],
81: ['gpio', 'NSSPSCLK', 'none', 'none'],
82: ['gpio', 'NSSPSFRM', 'none', 'none'],
83: ['gpio', 'NSSPTXD', 'NSSPRXD', 'none'],
84: ['gpio', 'NSSPTXD', 'NSSPRXD', 'none'],
}
#def __init__(self, dir=0, set=0, clr=0, alt=0, desc=''):
gpio_list = []
for i in range(0,85):
gpio_list.append(gpio())
#chip select GPIOs
gpio_list[18] = gpio(0, 0, 0, 1, 'RDY')
gpio_list[33] = gpio(1, 1, 0, 2, 'CS5#')
gpio_list[80] = gpio(1, 1, 0, 2, 'CS4#')
gpio_list[79] = gpio(1, 1, 0, 2, 'CS3#')
gpio_list[78] = gpio(1, 1, 0, 2, 'CS2#')
gpio_list[15] = gpio(1, 1, 0, 2, 'CS1#')
gpio_list[22] = gpio(0, 0, 0, 0, 'Consumer IR, PCC_S1_IRQ_O#')
gpio_list[21] = gpio(0, 0, 0, 0, 'IRQ_IDE, PFI')
gpio_list[19] = gpio(0, 0, 0, 0, 'XB_DREQ1, PCC_SO_IRQ_O#')
gpio_list[20] = gpio(0, 0, 0, 0, 'XB_DREQ0')
gpio_list[20] = gpio(0, 0, 0, 0, 'XB_DREQ0')
gpio_list[17] = gpio(0, 0, 0, 0, 'IRQ_AXB')
gpio_list[16] = gpio(1, 0, 0, 2, 'PWM0')
# PCMCIA stuff
gpio_list[57] = gpio(0, 0, 0, 1, 'PCC_IOIS16#')
gpio_list[56] = gpio(0, 0, 0, 1, 'PCC_WAIT#')
gpio_list[55] = gpio(1, 0, 0, 2, 'PCC_REG#')
gpio_list[54] = gpio(1, 0, 0, 2, 'PCC_SCKSEL')
gpio_list[53] = gpio(1, 1, 0, 2, 'PCC_CE2#')
gpio_list[52] = gpio(1, 1, 0, 2, 'PCC_CE1#')
gpio_list[51] = gpio(1, 1, 0, 1, 'PCC_IOW#')
gpio_list[50] = gpio(1, 1, 0, 2, 'PCC_IOR#')
gpio_list[49] = gpio(1, 1, 0, 2, 'PCC_WE#')
gpio_list[48] = gpio(1, 1, 0, 2, 'PCC_OE#')
# SSP port
gpio_list[26] = gpio(0, 0, 0, 1, 'SSP_RXD')
gpio_list[25] = gpio(0, 0, 0, 0, 'SSP_TXD')
gpio_list[24] = gpio(1, 0, 1, 2, 'SSP_SFRM')
gpio_list[23] = gpio(1, 0, 1, 2, 'SSP_SCLK')
gpio_list[27] = gpio(0, 0, 0, 0, 'SSP_EXTCLK')
# audio codec
gpio_list[32] = gpio(0, 0, 0, 0, 'AUD_SDIN1')
gpio_list[31] = gpio(1, 0, 0, 2, 'AC_SYNC')
gpio_list[30] = gpio(1, 0, 0, 2, 'AC_SDOUT')
gpio_list[29] = gpio(0, 0, 0, 1, 'AUD_SDIN0')
gpio_list[28] = gpio(0, 0, 0, 1, 'AC_BITCLK')
# serial ports
gpio_list[39] = gpio(1, 0, 0, 2, 'FF_TXD')
gpio_list[34] = gpio(0, 0, 0, 1, 'FF_RXD')
gpio_list[41] = gpio(1, 0, 0, 2, 'FF_RTS')
gpio_list[35] = gpio(0, 0, 0, 1, 'FF_CTS')
gpio_list[40] = gpio(1, 0, 0, 2, 'FF_DTR')
gpio_list[37] = gpio(0, 0, 0, 1, 'FF_DSR')
gpio_list[38] = gpio(0, 0, 0, 1, 'FF_RI')
gpio_list[36] = gpio(0, 0, 0, 1, 'FF_DCD')
gpio_list[43] = gpio(1, 0, 0, 2, 'BT_TXD')
gpio_list[42] = gpio(0, 0, 0, 1, 'BT_RXD')
gpio_list[45] = gpio(1, 0, 0, 2, 'BT_RTS')
gpio_list[44] = gpio(0, 0, 0, 1, 'BT_CTS')
gpio_list[47] = gpio(1, 0, 0, 1, 'IR_TXD')
gpio_list[46] = gpio(0, 0, 0, 2, 'IR_RXD')
# misc GPIO signals
gpio_list[14] = gpio(0, 0, 0, 0, 'MBREQ')
gpio_list[13] = gpio(0, 0, 0, 0, 'MBGNT')
gpio_list[12] = gpio(0, 0, 0, 0, 'GPIO_12/32K_CLK')
gpio_list[11] = gpio(0, 0, 0, 0, '3M6_CLK')
gpio_list[10] = gpio(1, 0, 1, 0, 'GPIO_10/RTC_CLK/debug LED')
gpio_list[9] = gpio(0, 0, 0, 0, 'MMC_CD#')
gpio_list[8] = gpio(0, 0, 0, 0, 'PCC_S1_CD#')
gpio_list[7] = gpio(0, 0, 0, 0, 'PCC_S0_CD#')
gpio_list[6] = gpio(1, 0, 0, 1, 'MMC_CLK')
gpio_list[5] = gpio(0, 0, 0, 0, 'IRQ_TOUCH#')
gpio_list[4] = gpio(0, 0, 0, 0, 'IRQ_ETH')
gpio_list[3] = gpio(0, 0, 0, 0, 'MQ_IRQ#')
gpio_list[2] = gpio(0, 0, 0, 0, 'BAT_DATA')
gpio_list[1] = gpio(0, 0, 0, 1, 'USER_RESET#')
gpio_list[0] = gpio(0, 0, 0, 1, 'USER_RESET#')
# LCD GPIOs
gpio_list[58] = gpio(1, 0, 0, 2, 'LDD0')
gpio_list[59] = gpio(1, 0, 0, 2, 'LDD1')
gpio_list[60] = gpio(1, 0, 0, 2, 'LDD2')
gpio_list[61] = gpio(1, 0, 0, 2, 'LDD3')
gpio_list[62] = gpio(1, 0, 0, 2, 'LDD4')
gpio_list[63] = gpio(1, 0, 0, 2, 'LDD5')
gpio_list[64] = gpio(1, 0, 0, 2, 'LDD6')
gpio_list[65] = gpio(1, 0, 0, 2, 'LDD7')
gpio_list[66] = gpio(1, 0, 0, 2, 'LDD8')
gpio_list[67] = gpio(1, 0, 0, 2, 'LDD9')
gpio_list[68] = gpio(1, 0, 0, 2, 'LDD10')
gpio_list[69] = gpio(1, 0, 0, 2, 'LDD11')
gpio_list[70] = gpio(1, 0, 0, 2, 'LDD12')
gpio_list[71] = gpio(1, 0, 0, 2, 'LDD13')
gpio_list[72] = gpio(1, 0, 0, 2, 'LDD14')
gpio_list[73] = gpio(1, 0, 0, 2, 'LDD15')
gpio_list[74] = gpio(1, 0, 0, 2, 'FCLK')
gpio_list[75] = gpio(1, 0, 0, 2, 'LCLK')
gpio_list[76] = gpio(1, 0, 0, 2, 'PCLK')
gpio_list[77] = gpio(1, 0, 0, 2, 'ACBIAS')
# calculate registers
pxa_regs = {
'gpdr0':0, 'gpdr1':0, 'gpdr2':0,
'gpsr0':0, 'gpsr1':0, 'gpsr2':0,
'gpcr0':0, 'gpcr1':0, 'gpcr2':0,
'gafr0_l':0, 'gafr0_u':0,
'gafr1_l':0, 'gafr1_u':0,
'gafr2_l':0, 'gafr2_u':0,
}
# U-boot define names
uboot_reg_names = {
'gpdr0':'CFG_GPDR0_VAL', 'gpdr1':'CFG_GPDR1_VAL', 'gpdr2':'CFG_GPDR2_VAL',
'gpsr0':'CFG_GPSR0_VAL', 'gpsr1':'CFG_GPSR1_VAL', 'gpsr2':'CFG_GPSR2_VAL',
'gpcr0':'CFG_GPCR0_VAL', 'gpcr1':'CFG_GPCR1_VAL', 'gpcr2':'CFG_GPCR2_VAL',
'gafr0_l':'CFG_GAFR0_L_VAL', 'gafr0_u':'CFG_GAFR0_U_VAL',
'gafr1_l':'CFG_GAFR1_L_VAL', 'gafr1_u':'CFG_GAFR1_U_VAL',
'gafr2_l':'CFG_GAFR2_L_VAL', 'gafr2_u':'CFG_GAFR2_U_VAL',
}
# bit mappings
bit_mappings = [
{ 'gpio':(0,32), 'shift':1, 'regs':{'dir':'gpdr0', 'set':'gpsr0', 'clr':'gpcr0'} },
{ 'gpio':(32,64), 'shift':1, 'regs':{'dir':'gpdr1', 'set':'gpsr1', 'clr':'gpcr1'} },
{ 'gpio':(64,85), 'shift':1, 'regs':{'dir':'gpdr2', 'set':'gpsr2', 'clr':'gpcr2'} },
{ 'gpio':(0,16), 'shift':2, 'regs':{'alt':'gafr0_l'} },
{ 'gpio':(16,32), 'shift':2, 'regs':{'alt':'gafr0_u'} },
{ 'gpio':(32,48), 'shift':2, 'regs':{'alt':'gafr1_l'} },
{ 'gpio':(48,64), 'shift':2, 'regs':{'alt':'gafr1_u'} },
{ 'gpio':(64,80), 'shift':2, 'regs':{'alt':'gafr2_l'} },
{ 'gpio':(80,85), 'shift':2, 'regs':{'alt':'gafr2_u'} },
]
def stuff_bits(bit_mapping, gpio_list):
gpios = range( bit_mapping['gpio'][0], bit_mapping['gpio'][1])
for gpio in gpios:
for reg in bit_mapping['regs'].keys():
value = eval( 'gpio_list[gpio].%s' % (reg) )
if ( value ):
# we have a high bit
bit_shift = (gpio - bit_mapping['gpio'][0]) * bit_mapping['shift']
bit = value << (bit_shift)
pxa_regs[bit_mapping['regs'][reg]] |= bit
for i in bit_mappings:
stuff_bits(i, gpio_list)
# now print out all regs
registers = pxa_regs.keys()
registers.sort()
for reg in registers:
print '%s: 0x%x' % (reg, pxa_regs[reg])
# print define to past right into U-Boot source code
print
print
for reg in registers:
print '#define %s 0x%x' % (uboot_reg_names[reg], pxa_regs[reg])
# print all GPIOS
print
print
for i in range(len(gpio_list)):
gpio_i = gpio_list[i]
alt_func_desc = pxa255_alt_func[i][gpio_i.alt]
print 'GPIO: %i, dir=%i, set=%i, clr=%i, alt=%s, desc=%s' % (i, gpio_i.dir, gpio_i.set, gpio_i.clr, alt_func_desc, gpio_i.desc)
| gpl-2.0 | 6,184,264,800,727,650,000 | 34.379421 | 128 | 0.549487 | false |
cgcgbcbc/django-xadmin | xadmin/plugins/themes.py | 25 | 2797 | #coding:utf-8
import urllib
from django.template import loader
from django.core.cache import cache
from django.utils.translation import ugettext as _
from xadmin.sites import site
from xadmin.models import UserSettings
from xadmin.views import BaseAdminPlugin, BaseAdminView
from xadmin.util import static, json
THEME_CACHE_KEY = 'xadmin_themes'
class ThemePlugin(BaseAdminPlugin):
enable_themes = False
# {'name': 'Blank Theme', 'description': '...', 'css': 'http://...', 'thumbnail': '...'}
user_themes = None
use_bootswatch = False
default_theme = static('xadmin/css/themes/bootstrap-xadmin.css')
bootstrap2_theme = static('xadmin/css/themes/bootstrap-theme.css')
def init_request(self, *args, **kwargs):
return self.enable_themes
def _get_theme(self):
if self.user:
try:
return UserSettings.objects.get(user=self.user, key="site-theme").value
except Exception:
pass
if '_theme' in self.request.COOKIES:
return urllib.unquote(self.request.COOKIES['_theme'])
return self.default_theme
def get_context(self, context):
context['site_theme'] = self._get_theme()
return context
# Media
def get_media(self, media):
return media + self.vendor('jquery-ui-effect.js', 'xadmin.plugin.themes.js')
# Block Views
def block_top_navmenu(self, context, nodes):
themes = [{'name': _(u"Default"), 'description': _(
u"Default bootstrap theme"), 'css': self.default_theme},
{'name': _(u"Bootstrap2"), 'description': _(u"Bootstrap 2.x theme"),
'css': self.bootstrap2_theme}]
select_css = context.get('site_theme', self.default_theme)
if self.user_themes:
themes.extend(self.user_themes)
if self.use_bootswatch:
ex_themes = cache.get(THEME_CACHE_KEY)
if ex_themes:
themes.extend(json.loads(ex_themes))
else:
ex_themes = []
try:
watch_themes = json.loads(urllib.urlopen(
'http://api.bootswatch.com/3/').read())['themes']
ex_themes.extend([
{'name': t['name'], 'description': t['description'],
'css': t['cssMin'], 'thumbnail': t['thumbnail']}
for t in watch_themes])
except Exception:
pass
cache.set(THEME_CACHE_KEY, json.dumps(ex_themes), 24 * 3600)
themes.extend(ex_themes)
nodes.append(loader.render_to_string('xadmin/blocks/comm.top.theme.html', {'themes': themes, 'select_css': select_css}))
site.register_plugin(ThemePlugin, BaseAdminView)
| bsd-3-clause | -5,510,038,210,642,784,000 | 34.858974 | 128 | 0.5867 | false |
myerpengine/odoo | openerp/modules/migration.py | 64 | 7546 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules migration handling. """
import imp
import logging
import os
from os.path import join as opj
import openerp
import openerp.release as release
import openerp.tools as tools
from openerp.tools.parse_version import parse_version
_logger = logging.getLogger(__name__)
class MigrationManager(object):
"""
This class manage the migration of modules
Migrations files must be python files containing a "migrate(cr, installed_version)" function.
Theses files must respect a directory tree structure: A 'migrations' folder which containt a
folder by version. Version can be 'module' version or 'server.module' version (in this case,
the files will only be processed by this version of the server). Python file names must start
by 'pre' or 'post' and will be executed, respectively, before and after the module initialisation
Example:
<moduledir>
`-- migrations
|-- 1.0
| |-- pre-update_table_x.py
| |-- pre-update_table_y.py
| |-- post-clean-data.py
| `-- README.txt # not processed
|-- 5.0.1.1 # files in this folder will be executed only on a 5.0 server
| |-- pre-delete_table_z.py
| `-- post-clean-data.py
`-- foo.py # not processed
This similar structure is generated by the maintenance module with the migrations files get by
the maintenance contract
"""
def __init__(self, cr, graph):
self.cr = cr
self.graph = graph
self.migrations = {}
self._get_files()
def _get_files(self):
"""
import addons.base.maintenance.utils as maintenance_utils
maintenance_utils.update_migrations_files(self.cr)
#"""
for pkg in self.graph:
self.migrations[pkg.name] = {}
if not (hasattr(pkg, 'update') or pkg.state == 'to upgrade'):
continue
get_module_filetree = openerp.modules.module.get_module_filetree
self.migrations[pkg.name]['module'] = get_module_filetree(pkg.name, 'migrations') or {}
self.migrations[pkg.name]['maintenance'] = get_module_filetree('base', 'maintenance/migrations/' + pkg.name) or {}
def migrate_module(self, pkg, stage):
assert stage in ('pre', 'post')
stageformat = {
'pre': '[>%s]',
'post': '[%s>]',
}
if not (hasattr(pkg, 'update') or pkg.state == 'to upgrade') or pkg.installed_version is None:
return
def convert_version(version):
if version.count('.') >= 2:
return version # the version number already containt the server version
return "%s.%s" % (release.major_version, version)
def _get_migration_versions(pkg):
def __get_dir(tree):
return [d for d in tree if tree[d] is not None]
versions = list(set(
__get_dir(self.migrations[pkg.name]['module']) +
__get_dir(self.migrations[pkg.name]['maintenance'])
))
versions.sort(key=lambda k: parse_version(convert_version(k)))
return versions
def _get_migration_files(pkg, version, stage):
""" return a list of tuple (module, file)
"""
m = self.migrations[pkg.name]
lst = []
mapping = {
'module': opj(pkg.name, 'migrations'),
'maintenance': opj('base', 'maintenance', 'migrations', pkg.name),
}
for x in mapping.keys():
if version in m[x]:
for f in m[x][version]:
if m[x][version][f] is not None:
continue
if not f.startswith(stage + '-'):
continue
lst.append(opj(mapping[x], version, f))
lst.sort()
return lst
def mergedict(a, b):
a = a.copy()
a.update(b)
return a
parsed_installed_version = parse_version(pkg.installed_version or '')
current_version = parse_version(convert_version(pkg.data['version']))
versions = _get_migration_versions(pkg)
for version in versions:
if parsed_installed_version < parse_version(convert_version(version)) <= current_version:
strfmt = {'addon': pkg.name,
'stage': stage,
'version': stageformat[stage] % version,
}
for pyfile in _get_migration_files(pkg, version, stage):
name, ext = os.path.splitext(os.path.basename(pyfile))
if ext.lower() != '.py':
continue
mod = fp = fp2 = None
try:
fp = tools.file_open(pyfile)
# imp.load_source need a real file object, so we create
# one from the file-like object we get from file_open
fp2 = os.tmpfile()
fp2.write(fp.read())
fp2.seek(0)
try:
mod = imp.load_source(name, pyfile, fp2)
_logger.info('module %(addon)s: Running migration %(version)s %(name)s' % mergedict({'name': mod.__name__}, strfmt))
migrate = mod.migrate
except ImportError:
_logger.exception('module %(addon)s: Unable to load %(stage)s-migration file %(file)s' % mergedict({'file': pyfile}, strfmt))
raise
except AttributeError:
_logger.error('module %(addon)s: Each %(stage)s-migration file must have a "migrate(cr, installed_version)" function' % strfmt)
else:
migrate(self.cr, pkg.installed_version)
finally:
if fp:
fp.close()
if fp2:
fp2.close()
if mod:
del mod
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,573,357,705,919,711,000 | 39.789189 | 155 | 0.518553 | false |
dstroppa/openstack-smartos-nova-grizzly | nova/tests/test_test.py | 25 | 1731 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the testing base code."""
from oslo.config import cfg
from nova.openstack.common import rpc
from nova import test
CONF = cfg.CONF
CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
class IsolationTestCase(test.TestCase):
"""Ensure that things are cleaned up after failed tests.
These tests don't really do much here, but if isolation fails a bunch
of other tests should fail.
"""
def test_service_isolation(self):
self.flags(use_local=True, group='conductor')
self.useFixture(test.ServiceFixture('compute'))
def test_rpc_consumer_isolation(self):
class NeverCalled(object):
def __getattribute__(*args):
assert False, "I should never get called."
connection = rpc.create_connection(new=True)
proxy = NeverCalled()
connection.create_consumer('compute', proxy, fanout=False)
connection.consume_in_thread()
| apache-2.0 | -7,932,088,463,272,151,000 | 32.941176 | 78 | 0.707106 | false |
andmos/ansible | lib/ansible/modules/cloud/google/gcp_sql_database.py | 9 | 9612 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_sql_database
description:
- Represents a SQL database inside the Cloud SQL instance, hosted in Google's cloud.
short_description: Creates a GCP Database
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
charset:
description:
- The MySQL charset value.
required: false
collation:
description:
- The MySQL collation value.
required: false
name:
description:
- The name of the database in the Cloud SQL instance.
- This does not include the project ID or instance name.
required: false
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place in the name of the resource here as a string
Alternatively, you can add `register: name-of-resource` to a gcp_sql_instance
task and then set this instance field to "{{ name-of-resource }}"'
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a instance
gcp_sql_instance:
name: "{{resource_name}}-3"
settings:
ip_configuration:
authorized_networks:
- name: google dns server
value: 8.8.8.8/32
tier: db-n1-standard-1
region: us-central1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: instance
- name: create a database
gcp_sql_database:
name: "test_object"
charset: utf8
instance: "{{ instance }}"
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
charset:
description:
- The MySQL charset value.
returned: success
type: str
collation:
description:
- The MySQL collation value.
returned: success
type: str
name:
description:
- The name of the database in the Cloud SQL instance.
- This does not include the project ID or instance name.
returned: success
type: str
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
charset=dict(type='str'),
collation=dict(type='str'),
name=dict(type='str'),
instance=dict(required=True),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin']
state = module.params['state']
kind = 'sql#database'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'sql')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'sql')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'sql')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'sql#database',
u'charset': module.params.get('charset'),
u'collation': module.params.get('collation'),
u'name': module.params.get('name'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'sql')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name'), 'name': module.params['name']}
return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/databases/{name}".format(**res)
def collection(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/databases".format(**res)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
# SQL only: return on 403 if not exist
if allow_not_found and response.status_code == 403:
return None
try:
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {u'charset': response.get(u'charset'), u'collation': response.get(u'collation'), u'name': response.get(u'name')}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/sql/v1beta4/projects/{project}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'sql#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'sql#database')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'sql#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
if __name__ == '__main__':
main()
| gpl-3.0 | -193,369,060,651,039,100 | 29.807692 | 148 | 0.596442 | false |
Jannes123/inasafe | safe/impact_functions/inundation/flood_polygon_population/test/test_flood_polygon_population.py | 2 | 3475 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- *Flood Vector on Population Test Cases.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Rizky Maulana Nugraha'
__date__ = '20/03/2015'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import unittest
import numpy
from safe.storage.core import read_layer
from safe.impact_functions.impact_function_manager \
import ImpactFunctionManager
from safe.test.utilities import get_qgis_app, test_data_path
from safe.impact_functions.inundation.flood_polygon_population\
.impact_function import FloodEvacuationVectorHazardFunction
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
class TestFloodEvacuationVectorHazardFunction(unittest.TestCase):
"""Test for Flood Vector Building Impact Function."""
def setUp(self):
registry = ImpactFunctionManager().registry
registry.clear()
registry.register(FloodEvacuationVectorHazardFunction)
def test_run(self):
function = FloodEvacuationVectorHazardFunction.instance()
hazard_path = test_data_path('hazard', 'flood_multipart_polygons.shp')
exposure_path = test_data_path(
'exposure', 'pop_binary_raster_20_20.asc')
hazard_layer = read_layer(hazard_path)
exposure_layer = read_layer(exposure_path)
function.hazard = hazard_layer
function.exposure = exposure_layer
function.parameters['affected_field'] = 'FLOODPRONE'
function.parameters['affected_value'] = 'YES'
function.run()
impact = function.impact
keywords = impact.get_keywords()
# print "keywords", keywords
affected_population = numpy.nansum(impact.get_data())
total_population = keywords['total_population']
self.assertEqual(affected_population, 20)
self.assertEqual(total_population, 200)
def test_filter(self):
"""Test filtering IF from layer keywords"""
hazard_keywords = {
'layer_purpose': 'hazard',
'layer_mode': 'classified',
'layer_geometry': 'polygon',
'hazard': 'flood',
'hazard_category': 'single_event',
'vector_hazard_classification': 'flood_vector_hazard_classes'
}
exposure_keywords = {
'layer_purpose': 'exposure',
'layer_mode': 'continuous',
'layer_geometry': 'raster',
'exposure': 'population',
'exposure_unit': 'count'
}
impact_functions = ImpactFunctionManager().filter_by_keywords(
hazard_keywords, exposure_keywords)
message = 'There should be 1 impact function, but there are: %s' % \
len(impact_functions)
self.assertEqual(1, len(impact_functions), message)
retrieved_if = impact_functions[0].metadata().as_dict()['id']
expected = ImpactFunctionManager().get_function_id(
FloodEvacuationVectorHazardFunction)
message = 'Expecting %s, but getting %s instead' % (
expected, retrieved_if)
self.assertEqual(expected, retrieved_if, message)
| gpl-3.0 | -1,906,181,947,858,958,000 | 35.578947 | 78 | 0.656115 | false |
gautam1858/tensorflow | tensorflow/python/framework/subscribe_test.py | 22 | 13362 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.subscribe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import subscribe
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SubscribeTest(test_util.TensorFlowTestCase):
def _ExpectSubscribedIdentities(self, container):
"""Convenience function to test a container of subscribed identities."""
self.assertTrue(
all(subscribe._is_subscribed_identity(x) for x in container))
@test_util.run_deprecated_v1
def testSideEffect(self):
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
with ops.control_dependencies([c]):
d = constant_op.constant(42)
n = math_ops.negative(c)
shared = []
def sub(t):
shared.append(t)
return t
c0 = c
self.assertTrue(c0.op in d.op.control_inputs)
c = subscribe.subscribe(c,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Verify that control dependencies are correctly moved to the subscription.
self.assertFalse(c0.op in d.op.control_inputs)
self.assertTrue(c.op in d.op.control_inputs)
with self.cached_session() as sess:
c_out = self.evaluate([c])
n_out = self.evaluate([n])
d_out = self.evaluate([d])
self.assertEqual(n_out, [-2])
self.assertEqual(c_out, [2])
self.assertEqual(d_out, [42])
self.assertEqual(shared, [2, 2, 2])
@test_util.run_deprecated_v1
def testSupportedTypes(self):
"""Confirm that supported types are correctly detected and handled."""
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
def sub(t):
return t
# Tuples.
subscribed = subscribe.subscribe(
(a, b), lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, tuple)
self._ExpectSubscribedIdentities(subscribed)
# Lists.
subscribed = subscribe.subscribe(
[a, b], lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, list)
self._ExpectSubscribedIdentities(subscribed)
# Dictionaries.
subscribed = subscribe.subscribe({
'first': a,
'second': b
}, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, dict)
self._ExpectSubscribedIdentities(subscribed.values())
# Namedtuples.
# pylint: disable=invalid-name
TensorPair = collections.namedtuple('TensorPair', ['first', 'second'])
# pylint: enable=invalid-name
pair = TensorPair(a, b)
subscribed = subscribe.subscribe(
pair, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, TensorPair)
self._ExpectSubscribedIdentities(subscribed)
# Expect an exception to be raised for unsupported types.
with self.assertRaisesRegexp(TypeError, 'has invalid type'):
subscribe.subscribe(c.name,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
@test_util.run_deprecated_v1
def testCaching(self):
"""Confirm caching of control output is recalculated between calls."""
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.control_dependencies([a]):
c = constant_op.constant(42)
shared = {}
def sub(t):
shared[t] = shared.get(t, 0) + 1
return t
a = subscribe.subscribe(a,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with ops.control_dependencies([b]):
d = constant_op.constant(11)
# If it was using outdated cached control_outputs then
# evaling would not trigger the new subscription.
b = subscribe.subscribe(b,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.cached_session() as sess:
c_out = self.evaluate([c])
d_out = self.evaluate([d])
self.assertEqual(c_out, [42])
self.assertEqual(d_out, [11])
self.assertEqual(shared, {2: 1, 1: 1})
@test_util.run_deprecated_v1
def testIsSubscribedIdentity(self):
"""Confirm subscribed identity ops are correctly detected."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
idop = array_ops.identity(c)
c_sub = subscribe.subscribe(c, [])
self.assertFalse(subscribe._is_subscribed_identity(a))
self.assertFalse(subscribe._is_subscribed_identity(c))
self.assertFalse(subscribe._is_subscribed_identity(idop))
self.assertTrue(subscribe._is_subscribed_identity(c_sub))
@test_util.run_deprecated_v1
def testSubscribeExtend(self):
"""Confirm side effect are correctly added for different input types."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
shared = {}
def sub(t, name):
shared[name] = shared.get(name, 0) + 1
return t
# Subscribe with a first side effect graph, passing an unsubscribed tensor.
sub_graph1 = lambda t: sub(t, 'graph1')
c_sub = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph1, [t], [t.dtype]))
# Add a second side effect graph, passing the tensor returned by the
# previous call to subscribe().
sub_graph2 = lambda t: sub(t, 'graph2')
c_sub2 = subscribe.subscribe(
c_sub, lambda t: script_ops.py_func(sub_graph2, [t], [t.dtype]))
# Add a third side effect graph, passing the original tensor.
sub_graph3 = lambda t: sub(t, 'graph3')
c_sub3 = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph3, [t], [t.dtype]))
# Make sure there's only one identity op matching the source tensor's name.
graph_ops = ops.get_default_graph().get_operations()
name_prefix = c.op.name + '/subscription/Identity'
identity_ops = [op for op in graph_ops if op.name.startswith(name_prefix)]
self.assertEqual(1, len(identity_ops))
# Expect the objects returned by subscribe() to reference the same tensor.
self.assertIs(c_sub, c_sub2)
self.assertIs(c_sub, c_sub3)
# Expect the three side effect graphs to have been evaluated.
with self.cached_session() as sess:
self.evaluate([c_sub])
self.assertIn('graph1', shared)
self.assertIn('graph2', shared)
self.assertIn('graph3', shared)
@test_util.run_v1_only('b/120545219')
def testSubscribeVariable(self):
"""Confirm that variables can be subscribed."""
v1 = variables.VariableV1(0.0)
v2 = variables.VariableV1(4.0)
add = math_ops.add(v1, v2)
assign_v1 = v1.assign(3.0)
shared = []
def sub(t):
shared.append(t)
return t
v1_sub = subscribe.subscribe(
v1, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertTrue(subscribe._is_subscribed_identity(v1_sub))
with self.cached_session() as sess:
# Initialize the variables first.
self.evaluate([v1.initializer])
self.evaluate([v2.initializer])
# Expect the side effects to be triggered when evaluating the add op as
# it will read the value of the variable.
self.evaluate([add])
self.assertEqual(1, len(shared))
# Expect the side effect not to be triggered when evaluating the assign
# op as it will not access the 'read' output of the variable.
self.evaluate([assign_v1])
self.assertEqual(1, len(shared))
self.evaluate([add])
self.assertEqual(2, len(shared))
# Make sure the values read from the variable match the expected ones.
self.assertEqual([0.0, 3.0], shared)
@test_util.run_v1_only('b/120545219')
def testResourceType(self):
"""Confirm that subscribe correctly handles tensors with 'resource' type."""
tensor_array = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name='test',
size=3,
infer_shape=False)
writer = tensor_array.write(0, [[4.0, 5.0]])
reader = writer.read(0)
shared = []
def sub(t):
shared.append(t)
return t
# TensorArray's handle output tensor has a 'resource' type and cannot be
# subscribed as it's not 'numpy compatible' (see dtypes.py).
# Expect that the original tensor is returned when subscribing to it.
tensor_array_sub = subscribe.subscribe(
tensor_array.handle, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIs(tensor_array_sub, tensor_array.handle)
self.assertFalse(subscribe._is_subscribed_identity(tensor_array.handle))
with self.cached_session() as sess:
self.evaluate([reader])
self.assertEqual(0, len(shared))
@test_util.run_deprecated_v1
def testMultipleOutputs(self):
"""Handle subscriptions to multiple outputs from the same op."""
sparse_tensor_1 = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
sparse_tensor_2 = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[2, 3], dense_shape=[3, 4])
# This op has three outputs.
sparse_add = sparse_ops.sparse_add(sparse_tensor_1, sparse_tensor_2)
self.assertEqual(3, len(sparse_add.op.outputs))
c1 = constant_op.constant(1)
with ops.control_dependencies(sparse_add.op.outputs):
# This op depends on all the three outputs.
neg = -c1
shared = []
def sub(t):
shared.append(t)
return t
# Subscribe the three outputs at once.
subscribe.subscribe(sparse_add.op.outputs,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.cached_session() as sess:
self.evaluate([neg])
# All three ops have been processed.
self.assertEqual(3, len(shared))
@test_util.run_deprecated_v1
def test_subscribe_tensors_on_different_devices(self):
"""Side effect ops are added with the same device of the subscribed op."""
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
with ops.device('cpu:0'):
add = math_ops.add(c1, c2)
with ops.device('cpu:1'):
mul = math_ops.multiply(c1, c2)
def sub(t):
return t
add_sub = subscribe.subscribe(
add, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
mul_sub = subscribe.subscribe(
mul, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Expect the identity tensors injected by subscribe to have been created
# on the same device as their original tensors.
self.assertNotEqual(add_sub.device, mul_sub.device)
self.assertEqual(add.device, add_sub.device)
self.assertEqual(mul.device, mul_sub.device)
@test_util.run_v1_only('b/120545219')
def test_subscribe_tensors_within_control_flow_context(self):
"""Side effect ops are added with the same control flow context."""
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
x1 = math_ops.add(c1, c2)
x2 = math_ops.multiply(c1, c2)
cond = control_flow_ops.cond(
x1 < x2,
lambda: math_ops.add(c1, c2, name='then'),
lambda: math_ops.subtract(c1, c2, name='else'),
name='cond')
branch = ops.get_default_graph().get_tensor_by_name('cond/then:0')
def context(tensor):
return tensor.op._get_control_flow_context()
self.assertIs(context(x1), context(x2))
self.assertIsNot(context(x1), context(branch))
results = []
def sub(tensor):
results.append(tensor)
return tensor
tensors = [x1, branch, x2]
subscriptions = subscribe.subscribe(
tensors, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
for tensor, subscription in zip(tensors, subscriptions):
self.assertIs(context(tensor), context(subscription))
# Verify that sub(x1) and sub(x2) are in the same context.
self.assertIs(context(subscriptions[0]), context(subscriptions[2]))
# Verify that sub(x1) and sub(branch) are not.
self.assertIsNot(context(subscriptions[0]), context(subscriptions[1]))
with self.cached_session() as sess:
self.evaluate(cond)
self.assertEqual(3, len(results))
if __name__ == '__main__':
googletest.main()
| apache-2.0 | -1,539,091,892,599,012,000 | 32.913706 | 80 | 0.661428 | false |
j0lly/molecule | test/unit/verifier/test_ansible_lint.py | 3 | 1797 | # Copyright (c) 2015-2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import pytest
import sh
from molecule.verifier import ansible_lint
@pytest.fixture()
def ansible_lint_instance(molecule_instance):
return ansible_lint.AnsibleLint(molecule_instance)
def test_execute(monkeypatch, patched_run_command, ansible_lint_instance):
monkeypatch.setenv('HOME', '/foo/bar')
ansible_lint_instance.execute()
parts = pytest.helpers.os_split(ansible_lint_instance._playbook)
assert 'playbook_data.yml' == parts[-1]
x = sh.ansible_lint.bake(ansible_lint_instance._playbook, '--exclude .git',
'--exclude .vagrant', '--exclude .molecule')
patched_run_command.assert_called_once_with(x, debug=None)
| mit | 3,209,938,926,068,100,000 | 42.829268 | 79 | 0.742905 | false |
glove747/liberty-neutron | neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.py | 32 | 2545 | # Copyright 2015 Huawei Technologies India Pvt Ltd, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""qos db changes
Revision ID: 48153cb5f051
Revises: 1b4c6e320f79
Create Date: 2015-06-24 17:03:34.965101
"""
# revision identifiers, used by Alembic.
revision = '48153cb5f051'
down_revision = '1b4c6e320f79'
from alembic import op
import sqlalchemy as sa
from neutron.api.v2 import attributes as attrs
def upgrade():
op.create_table(
'qos_policies',
sa.Column('id', sa.String(length=36), primary_key=True),
sa.Column('name', sa.String(length=attrs.NAME_MAX_LEN)),
sa.Column('description', sa.String(length=attrs.DESCRIPTION_MAX_LEN)),
sa.Column('shared', sa.Boolean(), nullable=False),
sa.Column('tenant_id', sa.String(length=attrs.TENANT_ID_MAX_LEN),
index=True))
op.create_table(
'qos_network_policy_bindings',
sa.Column('policy_id', sa.String(length=36),
sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
nullable=False),
sa.Column('network_id', sa.String(length=36),
sa.ForeignKey('networks.id', ondelete='CASCADE'),
nullable=False, unique=True))
op.create_table(
'qos_port_policy_bindings',
sa.Column('policy_id', sa.String(length=36),
sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
nullable=False),
sa.Column('port_id', sa.String(length=36),
sa.ForeignKey('ports.id', ondelete='CASCADE'),
nullable=False, unique=True))
op.create_table(
'qos_bandwidth_limit_rules',
sa.Column('id', sa.String(length=36), primary_key=True),
sa.Column('qos_policy_id', sa.String(length=36),
sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
nullable=False, unique=True),
sa.Column('max_kbps', sa.Integer()),
sa.Column('max_burst_kbps', sa.Integer()))
| apache-2.0 | -347,799,548,453,589,100 | 35.884058 | 78 | 0.63222 | false |
Zentyal/samba | source4/scripting/devel/repl_cleartext_pwd.py | 43 | 16362 | #!/usr/bin/env python
#
# Copyright Stefan Metzmacher 2011-2012
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This is useful to sync passwords from an AD domain.
#
# $
# $ source4/scripting/devel/repl_cleartext_pwd.py \
# -Uadministrator%A1b2C3d4 \
# 172.31.9.219 DC=bla,DC=base /tmp/cookie cleartext_utf8 131085 displayName
# # starting at usn[0]
# dn: CN=Test User1,CN=Users,DC=bla,DC=base
# cleartext_utf8: A1b2C3d4
# displayName:: VABlAHMAdAAgAFUAcwBlAHIAMQA=
#
# # up to usn[16449]
# $
# $ source4/scripting/devel/repl_cleartext_pwd.py \
# -Uadministrator%A1b2C3d4
# 172.31.9.219 DC=bla,DC=base cookie_file cleartext_utf8 131085 displayName
# # starting at usn[16449]
# # up to usn[16449]
# $
#
import sys
# Find right direction when running from source tree
sys.path.insert(0, "bin/python")
import samba.getopt as options
from optparse import OptionParser
from samba.dcerpc import drsuapi, drsblobs, misc
from samba.ndr import ndr_pack, ndr_unpack, ndr_print
import binascii
import hashlib
import Crypto.Cipher.ARC4
import struct
import os
from ldif import LDIFWriter
class globals:
def __init__(self):
self.global_objs = {}
self.ldif = LDIFWriter(sys.stdout)
def add_attr(self, dn, attname, vals):
if dn not in self.global_objs:
self.global_objs[dn] = {}
self.global_objs[dn][attname] = vals
def print_all(self):
for dn, obj in self.global_objs.items():
self.ldif.unparse(dn, obj)
continue
self.global_objs = {}
def attid_equal(a1,a2):
return (a1 & 0xffffffff) == (a2 & 0xffffffff)
########### main code ###########
if __name__ == "__main__":
parser = OptionParser("repl_cleartext_pwd.py [options] server dn cookie_file clear_utf8_name [attid attname attmode] [clear_utf16_name")
sambaopts = options.SambaOptions(parser)
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
(opts, args) = parser.parse_args()
if len(args) == 4:
pass
elif len(args) == 7:
pass
elif len(args) >= 8:
pass
else:
parser.error("more arguments required - given=%d" % (len(args)))
server = args[0]
dn = args[1]
cookie_file = args[2]
if len(cookie_file) == 0:
cookie_file = None
clear_utf8_name = args[3]
if len(args) >= 7:
try:
attid = int(args[4], 16)
except Exception:
attid = int(args[4])
attname = args[5]
attmode = args[6]
if attmode not in ["raw", "utf8"]:
parser.error("attmode should be 'raw' or 'utf8'")
else:
attid = -1
attname = None
attmode = "raw"
if len(args) >= 8:
clear_utf16_name = args[7]
else:
clear_utf16_name = None
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
if not creds.authentication_requested():
parser.error("You must supply credentials")
gls = globals()
try:
f = open(cookie_file, 'r')
store_blob = f.read()
f.close()
store_hdr = store_blob[0:28]
(store_version, \
store_dn_len, store_dn_ofs, \
store_hwm_len, store_hwm_ofs, \
store_utdv_len, store_utdv_ofs) = \
struct.unpack("<LLLLLLL", store_hdr)
store_dn = store_blob[store_dn_ofs:store_dn_ofs+store_dn_len]
store_hwm_blob = store_blob[store_hwm_ofs:store_hwm_ofs+store_hwm_len]
store_utdv_blob = store_blob[store_utdv_ofs:store_utdv_ofs+store_utdv_len]
store_hwm = ndr_unpack(drsuapi.DsReplicaHighWaterMark, store_hwm_blob)
store_utdv = ndr_unpack(drsblobs.replUpToDateVectorBlob, store_utdv_blob)
assert store_dn == dn
#print "%s" % ndr_print(store_hwm)
#print "%s" % ndr_print(store_utdv)
except Exception:
store_dn = dn
store_hwm = drsuapi.DsReplicaHighWaterMark()
store_hwm.tmp_highest_usn = 0
store_hwm.reserved_usn = 0
store_hwm.highest_usn = 0
store_utdv = None
binding_str = "ncacn_ip_tcp:%s[spnego,seal]" % server
drs_conn = drsuapi.drsuapi(binding_str, lp, creds)
bind_info = drsuapi.DsBindInfoCtr()
bind_info.length = 28
bind_info.info = drsuapi.DsBindInfo28()
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_BASE
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7
bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT
(info, drs_handle) = drs_conn.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info)
null_guid = misc.GUID()
naming_context = drsuapi.DsReplicaObjectIdentifier()
naming_context.dn = dn
highwatermark = store_hwm
uptodateness_vector = None
if store_utdv is not None:
uptodateness_vector = drsuapi.DsReplicaCursorCtrEx()
if store_utdv.version == 1:
uptodateness_vector.cursors = store_utdv.cursors
elif store_utdv.version == 2:
cursors = []
for i in range(0, store_utdv.ctr.count):
cursor = drsuapi.DsReplicaCursor()
cursor.source_dsa_invocation_id = store_utdv.ctr.cursors[i].source_dsa_invocation_id
cursor.highest_usn = store_utdv.ctr.cursors[i].highest_usn
cursors.append(cursor)
uptodateness_vector.cursors = cursors
req8 = drsuapi.DsGetNCChangesRequest8()
req8.destination_dsa_guid = null_guid
req8.source_dsa_invocation_id = null_guid
req8.naming_context = naming_context
req8.highwatermark = highwatermark
req8.uptodateness_vector = uptodateness_vector
req8.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
drsuapi.DRSUAPI_DRS_PER_SYNC |
drsuapi.DRSUAPI_DRS_GET_ANC |
drsuapi.DRSUAPI_DRS_NEVER_SYNCED |
drsuapi.DRSUAPI_DRS_WRIT_REP)
req8.max_object_count = 402
req8.max_ndr_size = 402116
req8.extended_op = 0
req8.fsmo_info = 0
req8.partial_attribute_set = None
req8.partial_attribute_set_ex = None
req8.mapping_ctr.num_mappings = 0
req8.mapping_ctr.mappings = None
user_session_key = drs_conn.user_session_key
print "# starting at usn[%d]" % (highwatermark.highest_usn)
while True:
(level, ctr) = drs_conn.DsGetNCChanges(drs_handle, 8, req8)
if ctr.first_object == None and ctr.object_count != 0:
raise RuntimeError("DsGetNCChanges: NULL first_object with object_count=%u" % (ctr.object_count))
obj_item = ctr.first_object
while obj_item is not None:
obj = obj_item.object
if obj.identifier is None:
obj_item = obj_item.next_object
continue
#print '%s' % obj.identifier.dn
is_deleted = False
for i in range(0, obj.attribute_ctr.num_attributes):
attr = obj.attribute_ctr.attributes[i]
if attid_equal(attr.attid, drsuapi.DRSUAPI_ATTID_isDeleted):
is_deleted = True
if is_deleted:
obj_item = obj_item.next_object
continue
spl_crypt = None
attvals = None
for i in range(0, obj.attribute_ctr.num_attributes):
attr = obj.attribute_ctr.attributes[i]
if attid_equal(attr.attid, attid):
attvals = []
for j in range(0, attr.value_ctr.num_values):
assert attr.value_ctr.values[j].blob is not None
val_raw = attr.value_ctr.values[j].blob
val = None
if attmode == "utf8":
val_unicode = unicode(val_raw, 'utf-16-le')
val = val_unicode.encode('utf-8')
elif attmode == "raw":
val = val_raw
else:
assert False, "attmode[%s]" % attmode
attvals.append(val)
if not attid_equal(attr.attid, drsuapi.DRSUAPI_ATTID_supplementalCredentials):
continue
assert attr.value_ctr.num_values <= 1
if attr.value_ctr.num_values == 0:
break
assert attr.value_ctr.values[0].blob is not None
spl_crypt = attr.value_ctr.values[0].blob
if spl_crypt is None:
obj_item = obj_item.next_object
continue
assert len(spl_crypt) >= 20
confounder = spl_crypt[0:16]
enc_buffer = spl_crypt[16:]
m5 = hashlib.md5()
m5.update(user_session_key)
m5.update(confounder)
enc_key = m5.digest()
rc4 = Crypto.Cipher.ARC4.new(enc_key)
plain_buffer = rc4.decrypt(enc_buffer)
(crc32_v) = struct.unpack("<L", plain_buffer[0:4])
attr_val = plain_buffer[4:]
crc32_c = binascii.crc32(attr_val) & 0xffffffff
assert int(crc32_v[0]) == int(crc32_c), "CRC32 0x%08X != 0x%08X" % (crc32_v[0], crc32_c)
spl = ndr_unpack(drsblobs.supplementalCredentialsBlob, attr_val)
#print '%s' % ndr_print(spl)
cleartext_hex = None
for i in range(0, spl.sub.num_packages):
pkg = spl.sub.packages[i]
if pkg.name != "Primary:CLEARTEXT":
continue
cleartext_hex = pkg.data
if cleartext_hex is not None:
cleartext_utf16 = binascii.a2b_hex(cleartext_hex)
if clear_utf16_name is not None:
gls.add_attr(obj.identifier.dn, clear_utf16_name, [cleartext_utf16])
try:
cleartext_unicode = unicode(cleartext_utf16, 'utf-16-le')
cleartext_utf8 = cleartext_unicode.encode('utf-8')
gls.add_attr(obj.identifier.dn, clear_utf8_name, [cleartext_utf8])
except Exception:
pass
if attvals is not None:
gls.add_attr(obj.identifier.dn, attname, attvals)
krb5_old_hex = None
for i in range(0, spl.sub.num_packages):
pkg = spl.sub.packages[i]
if pkg.name != "Primary:Kerberos":
continue
krb5_old_hex = pkg.data
if krb5_old_hex is not None:
krb5_old_raw = binascii.a2b_hex(krb5_old_hex)
krb5_old = ndr_unpack(drsblobs.package_PrimaryKerberosBlob, krb5_old_raw, allow_remaining=True)
#print '%s' % ndr_print(krb5_old)
krb5_new_hex = None
for i in range(0, spl.sub.num_packages):
pkg = spl.sub.packages[i]
if pkg.name != "Primary:Kerberos-Newer-Keys":
continue
krb5_new_hex = pkg.data
if krb5_new_hex is not None:
krb5_new_raw = binascii.a2b_hex(krb5_new_hex)
krb5_new = ndr_unpack(drsblobs.package_PrimaryKerberosBlob, krb5_new_raw, allow_remaining=True)
#print '%s' % ndr_print(krb5_new)
obj_item = obj_item.next_object
gls.print_all()
if ctr.more_data == 0:
store_hwm = ctr.new_highwatermark
store_utdv = drsblobs.replUpToDateVectorBlob()
store_utdv.version = ctr.uptodateness_vector.version
store_utdv_ctr = store_utdv.ctr
store_utdv_ctr.count = ctr.uptodateness_vector.count
store_utdv_ctr.cursors = ctr.uptodateness_vector.cursors
store_utdv.ctr = store_utdv_ctr
#print "%s" % ndr_print(store_hwm)
#print "%s" % ndr_print(store_utdv)
store_hwm_blob = ndr_pack(store_hwm)
store_utdv_blob = ndr_pack(store_utdv)
#
# uint32_t version '1'
# uint32_t dn_str_len
# uint32_t dn_str_ofs
# uint32_t hwm_blob_len
# uint32_t hwm_blob_ofs
# uint32_t utdv_blob_len
# uint32_t utdv_blob_ofs
store_hdr_len = 7 * 4
dn_ofs = store_hdr_len
hwm_ofs = dn_ofs + len(dn)
utdv_ofs = hwm_ofs + len(store_hwm_blob)
store_blob = struct.pack("<LLLLLLL", 1, \
len(dn), dn_ofs,
len(store_hwm_blob), hwm_ofs, \
len(store_utdv_blob), utdv_ofs) + \
dn + store_hwm_blob + store_utdv_blob
tmp_file = "%s.tmp" % cookie_file
f = open(tmp_file, 'wb')
f.write(store_blob)
f.close()
os.rename(tmp_file, cookie_file)
print "# up to usn[%d]" % (ctr.new_highwatermark.highest_usn)
break
print "# up to tmp_usn[%d]" % (ctr.new_highwatermark.highest_usn)
req8.highwatermark = ctr.new_highwatermark
| gpl-3.0 | 1,433,489,055,485,473,300 | 39.00489 | 140 | 0.598704 | false |
163gal/Time-Line | libs/wx/py/PySlices.py | 10 | 3159 | #!/usr/bin/env python
"""PySlices is a python block code editor / shell and namespace browser application."""
# The next two lines, and the other code below that makes use of
# ``__main__`` and ``original``, serve the purpose of cleaning up the
# main namespace to look as much as possible like the regular Python
# shell environment.
import __main__
original = __main__.__dict__.keys()
__author__ = "Patrick K. O'Brien <[email protected]> / "
__author__ += "David N. Mashburn <[email protected]>"
__cvsid__ = "$Id: PySlices.py 36607 2005-12-30 23:02:03Z RD $" # Hmmm...
__revision__ = "$Revision: 36607 $"[11:-2] #Hmmm...
import wx
import os
class App(wx.App):
"""PySlices standalone application."""
def __init__(self, filename=None):
self.filename = filename
import wx
wx.App.__init__(self, redirect=False)
def OnInit(self):
import os
import wx
from wx import py
self.SetAppName("pyslices")
confDir = wx.StandardPaths.Get().GetUserDataDir()
if not os.path.exists(confDir):
os.mkdir(confDir)
fileName = os.path.join(confDir, 'config')
self.config = wx.FileConfig(localFilename=fileName)
self.config.SetRecordDefaults(True)
self.frame = py.crustslices.CrustSlicesFrame(config=self.config, dataDir=confDir,
filename=self.filename)
## self.frame.startupFileName = os.path.join(confDir,'pycrust_startup')
## self.frame.historyFileName = os.path.join(confDir,'pycrust_history')
self.frame.Show()
self.SetTopWindow(self.frame)
return True
'''
The main() function needs to handle being imported, such as with the
pycrust script that wxPython installs:
#!/usr/bin/env python
from wx.py.PySlices import main
main()
'''
def main(filename=None):
"""The main function for the PySlices program."""
# Cleanup the main namespace, leaving the App class.
import sys
if not filename and len(sys.argv) > 1:
filename = sys.argv[1]
if filename:
filename = os.path.realpath(filename)
import __main__
md = __main__.__dict__
keepers = original
keepers.append('App')
keepers.append('filename')
for key in md.keys():
if key not in keepers:
del md[key]
# Create an application instance.
app = App(filename=filename)
# Mimic the contents of the standard Python shell's sys.path.
import sys
if sys.path[0]:
sys.path[0] = ''
# Add the application object to the sys module's namespace.
# This allows a shell user to do:
# >>> import sys
# >>> sys.app.whatever
sys.app = app
del sys
# Cleanup the main namespace some more.
if md.has_key('App') and md['App'] is App:
del md['App']
if md.has_key('filename') and md['filename'] is filename:
del md['filename']
if md.has_key('__main__') and md['__main__'] is __main__:
del md['__main__']
# Start the wxPython event loop.
app.MainLoop()
if __name__ == '__main__':
main()
| gpl-3.0 | -3,069,522,285,386,507,000 | 30.59 | 89 | 0.609687 | false |
rlutz/xorn | tests/cpython/storage/get_obdata.py | 1 | 1953 | # Copyright (C) 2013-2019 Roland Lutz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import xorn.storage, Setup
def assert_cannot_get(rev, ob):
try:
rev.get_object_data(ob)
except KeyError:
pass
else:
raise AssertionError
rev0, rev1, rev2, rev3, ob0, ob1a, ob1b = Setup.setup()
assert_cannot_get(rev0, ob0)
assert_cannot_get(rev0, ob1a)
assert_cannot_get(rev0, ob1b)
data = rev1.get_object_data(ob0)
assert data is not None
assert data != Setup.line_data
assert type(data) == type(Setup.line_data)
assert_cannot_get(rev1, ob1a)
assert_cannot_get(rev1, ob1b)
data = rev2.get_object_data(ob0)
assert data is not None
assert data != Setup.line_data
assert type(data) == type(Setup.line_data)
data = rev2.get_object_data(ob1a)
assert data is not None
assert data != Setup.box_data
assert type(data) == type(Setup.box_data)
data = rev2.get_object_data(ob1b)
assert data is not None
assert data != Setup.circle_data
assert type(data) == type(Setup.circle_data)
data = rev3.get_object_data(ob0)
assert data is not None
assert data != Setup.net_data
assert type(data) == type(Setup.net_data)
assert_cannot_get(rev3, ob1a)
data = rev3.get_object_data(ob1b)
assert data is not None
assert data != Setup.circle_data
assert type(data) == type(Setup.circle_data)
| gpl-2.0 | 3,698,071,841,098,026,000 | 31.016393 | 73 | 0.737839 | false |
qguv/config | weechat/community/wee-slack/wee_slack.py | 1 | 197049 | # Copyright (c) 2014-2016 Ryan Huber <[email protected]>
# Copyright (c) 2015-2018 Tollef Fog Heen <[email protected]>
# Copyright (c) 2015-2020 Trygve Aaberge <[email protected]>
# Released under the MIT license.
from __future__ import print_function, unicode_literals
from collections import OrderedDict
from datetime import date, datetime, timedelta
from functools import partial, wraps
from io import StringIO
from itertools import chain, count, islice
import errno
import textwrap
import time
import json
import hashlib
import os
import re
import sys
import traceback
import collections
import ssl
import random
import socket
import string
# Prevent websocket from using numpy (it's an optional dependency). We do this
# because numpy causes python (and thus weechat) to crash when it's reloaded.
# See https://github.com/numpy/numpy/issues/11925
sys.modules["numpy"] = None
from websocket import ABNF, create_connection, WebSocketConnectionClosedException
try:
basestring # Python 2
unicode
str = unicode
except NameError: # Python 3
basestring = unicode = str
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
from json import JSONDecodeError
except:
JSONDecodeError = ValueError
# hack to make tests possible.. better way?
try:
import weechat
except ImportError:
pass
SCRIPT_NAME = "slack"
SCRIPT_AUTHOR = "Ryan Huber <[email protected]>"
SCRIPT_VERSION = "2.4.0"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Extends weechat for typing notification/search/etc on slack.com"
REPO_URL = "https://github.com/wee-slack/wee-slack"
BACKLOG_SIZE = 200
SCROLLBACK_SIZE = 500
RECORD_DIR = "/tmp/weeslack-debug"
SLACK_API_TRANSLATOR = {
"channel": {
"history": "channels.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "channels.mark",
"info": "channels.info",
},
"im": {
"history": "im.history",
"join": "conversations.open",
"leave": "conversations.close",
"mark": "im.mark",
},
"mpim": {
"history": "mpim.history",
"join": "mpim.open", # conversations.open lacks unread_count_display
"leave": "conversations.close",
"mark": "mpim.mark",
"info": "groups.info",
},
"group": {
"history": "groups.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "groups.mark",
"info": "groups.info"
},
"private": {
"history": "conversations.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "conversations.mark",
"info": "conversations.info",
},
"shared": {
"history": "conversations.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "channels.mark",
"info": "conversations.info",
},
"thread": {
"history": None,
"join": None,
"leave": None,
"mark": None,
}
}
###### Decorators have to be up here
def slack_buffer_or_ignore(f):
"""
Only run this function if we're in a slack buffer, else ignore
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return w.WEECHAT_RC_OK
return f(data, current_buffer, *args, **kwargs)
return wrapper
def slack_buffer_required(f):
"""
Only run this function if we're in a slack buffer, else print error
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
command_name = f.__name__.replace('command_', '', 1)
w.prnt('', 'slack: command "{}" must be executed on slack buffer'.format(command_name))
return w.WEECHAT_RC_ERROR
return f(data, current_buffer, *args, **kwargs)
return wrapper
def utf8_decode(f):
"""
Decode all arguments from byte strings to unicode strings. Use this for
functions called from outside of this script, e.g. callbacks from weechat.
"""
@wraps(f)
def wrapper(*args, **kwargs):
return f(*decode_from_utf8(args), **decode_from_utf8(kwargs))
return wrapper
NICK_GROUP_HERE = "0|Here"
NICK_GROUP_AWAY = "1|Away"
NICK_GROUP_EXTERNAL = "2|External"
sslopt_ca_certs = {}
if hasattr(ssl, "get_default_verify_paths") and callable(ssl.get_default_verify_paths):
ssl_defaults = ssl.get_default_verify_paths()
if ssl_defaults.cafile is not None:
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
EMOJI = {}
EMOJI_WITH_SKIN_TONES_REVERSE = {}
###### Unicode handling
def encode_to_utf8(data):
if sys.version_info.major > 2:
return data
elif isinstance(data, unicode):
return data.encode('utf-8')
if isinstance(data, bytes):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(encode_to_utf8, data.items()))
elif isinstance(data, collections.Iterable):
return type(data)(map(encode_to_utf8, data))
else:
return data
def decode_from_utf8(data):
if sys.version_info.major > 2:
return data
elif isinstance(data, bytes):
return data.decode('utf-8')
if isinstance(data, unicode):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(decode_from_utf8, data.items()))
elif isinstance(data, collections.Iterable):
return type(data)(map(decode_from_utf8, data))
else:
return data
class WeechatWrapper(object):
def __init__(self, wrapped_class):
self.wrapped_class = wrapped_class
# Helper method used to encode/decode method calls.
def wrap_for_utf8(self, method):
def hooked(*args, **kwargs):
result = method(*encode_to_utf8(args), **encode_to_utf8(kwargs))
# Prevent wrapped_class from becoming unwrapped
if result == self.wrapped_class:
return self
return decode_from_utf8(result)
return hooked
# Encode and decode everything sent to/received from weechat. We use the
# unicode type internally in wee-slack, but has to send utf8 to weechat.
def __getattr__(self, attr):
orig_attr = self.wrapped_class.__getattribute__(attr)
if callable(orig_attr):
return self.wrap_for_utf8(orig_attr)
else:
return decode_from_utf8(orig_attr)
# Ensure all lines sent to weechat specifies a prefix. For lines after the
# first, we want to disable the prefix, which is done by specifying a space.
def prnt_date_tags(self, buffer, date, tags, message):
message = message.replace("\n", "\n \t")
return self.wrap_for_utf8(self.wrapped_class.prnt_date_tags)(buffer, date, tags, message)
class ProxyWrapper(object):
def __init__(self):
self.proxy_name = w.config_string(w.config_get('weechat.network.proxy_curl'))
self.proxy_string = ""
self.proxy_type = ""
self.proxy_address = ""
self.proxy_port = ""
self.proxy_user = ""
self.proxy_password = ""
self.has_proxy = False
if self.proxy_name:
self.proxy_string = "weechat.proxy.{}".format(self.proxy_name)
self.proxy_type = w.config_string(w.config_get("{}.type".format(self.proxy_string)))
if self.proxy_type == "http":
self.proxy_address = w.config_string(w.config_get("{}.address".format(self.proxy_string)))
self.proxy_port = w.config_integer(w.config_get("{}.port".format(self.proxy_string)))
self.proxy_user = w.config_string(w.config_get("{}.username".format(self.proxy_string)))
self.proxy_password = w.config_string(w.config_get("{}.password".format(self.proxy_string)))
self.has_proxy = True
else:
w.prnt("", "\nWarning: weechat.network.proxy_curl is set to {} type (name : {}, conf string : {}). Only HTTP proxy is supported.\n\n".format(self.proxy_type, self.proxy_name, self.proxy_string))
def curl(self):
if not self.has_proxy:
return ""
if self.proxy_user and self.proxy_password:
user = "{}:{}@".format(self.proxy_user, self.proxy_password)
else:
user = ""
if self.proxy_port:
port = ":{}".format(self.proxy_port)
else:
port = ""
return "-x{}{}{}".format(user, self.proxy_address, port)
##### Helpers
def colorize_string(color, string, reset_color='reset'):
if color:
return w.color(color) + string + w.color(reset_color)
else:
return string
def print_error(message, buffer='', warning=False):
prefix = 'Warning' if warning else 'Error'
w.prnt(buffer, '{}{}: {}'.format(w.prefix('error'), prefix, message))
def token_for_print(token):
return '{}...{}'.format(token[:15], token[-10:])
def format_exc_tb():
return decode_from_utf8(traceback.format_exc())
def format_exc_only():
etype, value, _ = sys.exc_info()
return ''.join(decode_from_utf8(traceback.format_exception_only(etype, value)))
def get_nick_color(nick):
info_name_prefix = "irc_" if int(weechat_version) < 0x1050000 else ""
return w.info_get(info_name_prefix + "nick_color_name", nick)
def get_thread_color(thread_id):
if config.color_thread_suffix == 'multiple':
return get_nick_color(thread_id)
else:
return config.color_thread_suffix
def sha1_hex(s):
return hashlib.sha1(s.encode('utf-8')).hexdigest()
def get_functions_with_prefix(prefix):
return {name[len(prefix):]: ref for name, ref in globals().items()
if name.startswith(prefix)}
def handle_socket_error(exception, team, caller_name):
if not (isinstance(exception, WebSocketConnectionClosedException) or
exception.errno in (errno.EPIPE, errno.ECONNRESET, errno.ETIMEDOUT)):
raise
w.prnt(team.channel_buffer,
'Lost connection to slack team {} (on {}), reconnecting.'.format(
team.domain, caller_name))
dbg('Socket failed on {} with exception:\n{}'.format(
caller_name, format_exc_tb()), level=5)
team.set_disconnected()
EMOJI_NAME_REGEX = re.compile(':([^: ]+):')
EMOJI_REGEX_STRING = '[\U00000080-\U0010ffff]+'
def regex_match_to_emoji(match, include_name=False):
emoji = match.group(1)
full_match = match.group()
char = EMOJI.get(emoji, full_match)
if include_name and char != full_match:
return '{} ({})'.format(char, full_match)
return char
def replace_string_with_emoji(text):
if config.render_emoji_as_string == 'both':
return EMOJI_NAME_REGEX.sub(
partial(regex_match_to_emoji, include_name=True),
text,
)
elif config.render_emoji_as_string:
return text
return EMOJI_NAME_REGEX.sub(regex_match_to_emoji, text)
def replace_emoji_with_string(text):
return EMOJI_WITH_SKIN_TONES_REVERSE.get(text, text)
###### New central Event router
class EventRouter(object):
def __init__(self):
"""
complete
Eventrouter is the central hub we use to route:
1) incoming websocket data
2) outgoing http requests and incoming replies
3) local requests
It has a recorder that, when enabled, logs most events
to the location specified in RECORD_DIR.
"""
self.queue = []
self.slow_queue = []
self.slow_queue_timer = 0
self.teams = {}
self.subteams = {}
self.context = {}
self.weechat_controller = WeechatController(self)
self.previous_buffer = ""
self.reply_buffer = {}
self.cmds = get_functions_with_prefix("command_")
self.proc = get_functions_with_prefix("process_")
self.handlers = get_functions_with_prefix("handle_")
self.local_proc = get_functions_with_prefix("local_process_")
self.shutting_down = False
self.recording = False
self.recording_path = "/tmp"
self.handle_next_hook = None
self.handle_next_hook_interval = -1
def record(self):
"""
complete
Toggles the event recorder and creates a directory for data if enabled.
"""
self.recording = not self.recording
if self.recording:
if not os.path.exists(RECORD_DIR):
os.makedirs(RECORD_DIR)
def record_event(self, message_json, file_name_field, subdir=None):
"""
complete
Called each time you want to record an event.
message_json is a json in dict form
file_name_field is the json key whose value you want to be part of the file name
"""
now = time.time()
if subdir:
directory = "{}/{}".format(RECORD_DIR, subdir)
else:
directory = RECORD_DIR
if not os.path.exists(directory):
os.makedirs(directory)
mtype = message_json.get(file_name_field, 'unknown')
f = open('{}/{}-{}.json'.format(directory, now, mtype), 'w')
f.write("{}".format(json.dumps(message_json)))
f.close()
def store_context(self, data):
"""
A place to store data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
identifier = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(40))
self.context[identifier] = data
dbg("stored context {} {} ".format(identifier, data.url))
return identifier
def retrieve_context(self, identifier):
"""
A place to retrieve data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
return self.context.get(identifier)
def delete_context(self, identifier):
"""
Requests can span multiple requests, so we may need to delete this as a last step
"""
if identifier in self.context:
del self.context[identifier]
def shutdown(self):
"""
complete
This toggles shutdown mode. Shutdown mode tells us not to
talk to Slack anymore. Without this, typing /quit will trigger
a race with the buffer close callback and may result in you
leaving every slack channel.
"""
self.shutting_down = not self.shutting_down
def register_team(self, team):
"""
complete
Adds a team to the list of known teams for this EventRouter.
"""
if isinstance(team, SlackTeam):
self.teams[team.get_team_hash()] = team
else:
raise InvalidType(type(team))
def reconnect_if_disconnected(self):
for team in self.teams.values():
time_since_last_ping = time.time() - team.last_ping_time
time_since_last_pong = time.time() - team.last_pong_time
if team.connected and time_since_last_ping < 5 and time_since_last_pong > 30:
w.prnt(team.channel_buffer,
'Lost connection to slack team {} (no pong), reconnecting.'.format(
team.domain))
team.set_disconnected()
if not team.connected:
team.connect()
dbg("reconnecting {}".format(team))
@utf8_decode
def receive_ws_callback(self, team_hash, fd):
"""
This is called by the global method of the same name.
It is triggered when we have incoming data on a websocket,
which needs to be read. Once it is read, we will ensure
the data is valid JSON, add metadata, and place it back
on the queue for processing as JSON.
"""
team = self.teams[team_hash]
while True:
try:
# Read the data from the websocket associated with this team.
opcode, data = team.ws.recv_data(control_frame=True)
except ssl.SSLWantReadError:
# No more data to read at this time.
return w.WEECHAT_RC_OK
except (WebSocketConnectionClosedException, socket.error) as e:
handle_socket_error(e, team, 'receive')
return w.WEECHAT_RC_OK
if opcode == ABNF.OPCODE_PONG:
team.last_pong_time = time.time()
return w.WEECHAT_RC_OK
elif opcode != ABNF.OPCODE_TEXT:
return w.WEECHAT_RC_OK
message_json = json.loads(data.decode('utf-8'))
message_json["wee_slack_metadata_team"] = team
if self.recording:
self.record_event(message_json, 'type', 'websocket')
self.receive(message_json)
return w.WEECHAT_RC_OK
@utf8_decode
def receive_httprequest_callback(self, data, command, return_code, out, err):
"""
complete
Receives the result of an http request we previously handed
off to weechat (weechat bundles libcurl). Weechat can fragment
replies, so it buffers them until the reply is complete.
It is then populated with metadata here so we can identify
where the request originated and route properly.
"""
request_metadata = self.retrieve_context(data)
dbg("RECEIVED CALLBACK with request of {} id of {} and code {} of length {}".format(request_metadata.request, request_metadata.response_id, return_code, len(out)))
if return_code == 0:
if len(out) > 0:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = StringIO()
self.reply_buffer[request_metadata.response_id].write(out)
try:
j = json.loads(self.reply_buffer[request_metadata.response_id].getvalue())
except:
pass
# dbg("Incomplete json, awaiting more", True)
try:
j["wee_slack_process_method"] = request_metadata.request_normalized
if self.recording:
self.record_event(j, 'wee_slack_process_method', 'http')
j["wee_slack_request_metadata"] = request_metadata
self.reply_buffer.pop(request_metadata.response_id)
self.receive(j)
self.delete_context(data)
except:
dbg("HTTP REQUEST CALLBACK FAILED", True)
pass
# We got an empty reply and this is weird so just ditch it and retry
else:
dbg("length was zero, probably a bug..")
self.delete_context(data)
self.receive(request_metadata)
elif return_code == -1:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = StringIO()
self.reply_buffer[request_metadata.response_id].write(out)
else:
self.reply_buffer.pop(request_metadata.response_id, None)
self.delete_context(data)
if request_metadata.request.startswith('rtm.'):
retry_text = ('retrying' if request_metadata.should_try() else
'will not retry after too many failed attempts')
w.prnt('', ('Failed connecting to slack team with token {}, {}. ' +
'If this persists, try increasing slack_timeout. Error: {}')
.format(token_for_print(request_metadata.token), retry_text, err))
dbg('rtm.start failed with return_code {}. stack:\n{}'
.format(return_code, ''.join(traceback.format_stack())), level=5)
self.receive(request_metadata)
return w.WEECHAT_RC_OK
def receive(self, dataobj):
"""
complete
Receives a raw object and places it on the queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.queue.append(dataobj)
def receive_slow(self, dataobj):
"""
complete
Receives a raw object and places it on the slow queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.slow_queue.append(dataobj)
def handle_next(self):
"""
complete
Main handler of the EventRouter. This is called repeatedly
via callback to drain events from the queue. It also attaches
useful metadata and context to events as they are processed.
"""
wanted_interval = 100
if len(self.slow_queue) > 0 or len(self.queue) > 0:
wanted_interval = 10
if self.handle_next_hook is None or wanted_interval != self.handle_next_hook_interval:
if self.handle_next_hook:
w.unhook(self.handle_next_hook)
self.handle_next_hook = w.hook_timer(wanted_interval, 0, 0, "handle_next", "")
self.handle_next_hook_interval = wanted_interval
if len(self.slow_queue) > 0 and ((self.slow_queue_timer + 1) < time.time()):
dbg("from slow queue", 0)
self.queue.append(self.slow_queue.pop())
self.slow_queue_timer = time.time()
if len(self.queue) > 0:
j = self.queue.pop(0)
# Reply is a special case of a json reply from websocket.
kwargs = {}
if isinstance(j, SlackRequest):
if j.should_try():
if j.retry_ready():
local_process_async_slack_api_request(j, self)
else:
self.slow_queue.append(j)
else:
dbg("Max retries for Slackrequest")
else:
if "reply_to" in j:
dbg("SET FROM REPLY")
function_name = "reply"
elif "type" in j:
dbg("SET FROM type")
function_name = j["type"]
elif "wee_slack_process_method" in j:
dbg("SET FROM META")
function_name = j["wee_slack_process_method"]
else:
dbg("SET FROM NADA")
function_name = "unknown"
request = j.get("wee_slack_request_metadata")
if request:
team = request.team
channel = request.channel
metadata = request.metadata
else:
team = j.get("wee_slack_metadata_team")
channel = None
metadata = {}
if team:
if "channel" in j:
channel_id = j["channel"]["id"] if type(j["channel"]) == dict else j["channel"]
channel = team.channels.get(channel_id, channel)
if "user" in j:
user_id = j["user"]["id"] if type(j["user"]) == dict else j["user"]
metadata['user'] = team.users.get(user_id)
dbg("running {}".format(function_name))
if function_name.startswith("local_") and function_name in self.local_proc:
self.local_proc[function_name](j, self, team, channel, metadata)
elif function_name in self.proc:
self.proc[function_name](j, self, team, channel, metadata)
elif function_name in self.handlers:
self.handlers[function_name](j, self, team, channel, metadata)
else:
dbg("Callback not implemented for event: {}".format(function_name))
def handle_next(data, remaining_calls):
try:
EVENTROUTER.handle_next()
except:
if config.debug_mode:
traceback.print_exc()
else:
pass
return w.WEECHAT_RC_OK
class WeechatController(object):
"""
Encapsulates our interaction with weechat
"""
def __init__(self, eventrouter):
self.eventrouter = eventrouter
self.buffers = {}
self.previous_buffer = None
self.buffer_list_stale = False
def iter_buffers(self):
for b in self.buffers:
yield (b, self.buffers[b])
def register_buffer(self, buffer_ptr, channel):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
if isinstance(buffer_ptr, basestring):
self.buffers[buffer_ptr] = channel
else:
raise InvalidType(type(buffer_ptr))
def unregister_buffer(self, buffer_ptr, update_remote=False, close_buffer=False):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
channel = self.buffers.get(buffer_ptr)
if channel:
channel.destroy_buffer(update_remote)
del self.buffers[buffer_ptr]
if close_buffer:
w.buffer_close(buffer_ptr)
def get_channel_from_buffer_ptr(self, buffer_ptr):
return self.buffers.get(buffer_ptr)
def get_all(self, buffer_ptr):
return self.buffers
def get_previous_buffer_ptr(self):
return self.previous_buffer
def set_previous_buffer(self, data):
self.previous_buffer = data
def check_refresh_buffer_list(self):
return self.buffer_list_stale and self.last_buffer_list_update + 1 < time.time()
def set_refresh_buffer_list(self, setting):
self.buffer_list_stale = setting
###### New Local Processors
def local_process_async_slack_api_request(request, event_router):
"""
complete
Sends an API request to Slack. You'll need to give this a well formed SlackRequest object.
DEBUGGING!!! The context here cannot be very large. Weechat will crash.
"""
if not event_router.shutting_down:
weechat_request = 'url:{}'.format(request.request_string())
weechat_request += '&nonce={}'.format(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(4)))
params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
request.tried()
context = event_router.store_context(request)
# TODO: let flashcode know about this bug - i have to 'clear' the hashtable or retry requests fail
w.hook_process_hashtable('url:', params, config.slack_timeout, "", context)
w.hook_process_hashtable(weechat_request, params, config.slack_timeout, "receive_httprequest_callback", context)
###### New Callbacks
@utf8_decode
def ws_ping_cb(data, remaining_calls):
for team in EVENTROUTER.teams.values():
if team.ws and team.connected:
try:
team.ws.ping()
team.last_ping_time = time.time()
except (WebSocketConnectionClosedException, socket.error) as e:
handle_socket_error(e, team, 'ping')
return w.WEECHAT_RC_OK
@utf8_decode
def reconnect_callback(*args):
EVENTROUTER.reconnect_if_disconnected()
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_closing_callback(signal, sig_type, data):
"""
Receives a callback from weechat when a buffer is being closed.
"""
EVENTROUTER.weechat_controller.unregister_buffer(data, True, False)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_input_callback(signal, buffer_ptr, data):
"""
incomplete
Handles everything a user types in the input bar. In our case
this includes add/remove reactions, modifying messages, and
sending messages.
"""
eventrouter = eval(signal)
channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(buffer_ptr)
if not channel:
return w.WEECHAT_RC_ERROR
def get_id(message_id):
if not message_id:
return 1
elif message_id[0] == "$":
return message_id[1:]
else:
return int(message_id)
message_id_regex = r"(\d*|\$[0-9a-fA-F]{3,})"
reaction = re.match(r"^{}(\+|-)(:(.+):|{})\s*$".format(message_id_regex, EMOJI_REGEX_STRING), data)
substitute = re.match("^{}s/".format(message_id_regex), data)
if reaction:
emoji_match = reaction.group(4) or reaction.group(3)
emoji = replace_emoji_with_string(emoji_match)
if reaction.group(2) == "+":
channel.send_add_reaction(get_id(reaction.group(1)), emoji)
elif reaction.group(2) == "-":
channel.send_remove_reaction(get_id(reaction.group(1)), emoji)
elif substitute:
msg_id = get_id(substitute.group(1))
try:
old, new, flags = re.split(r'(?<!\\)/', data)[1:]
except ValueError:
pass
else:
# Replacement string in re.sub() is a string, not a regex, so get
# rid of escapes.
new = new.replace(r'\/', '/')
old = old.replace(r'\/', '/')
channel.edit_nth_previous_message(msg_id, old, new, flags)
else:
if data.startswith(('//', ' ')):
data = data[1:]
channel.send_message(data)
# this is probably wrong channel.mark_read(update_remote=True, force=True)
return w.WEECHAT_RC_OK
# Workaround for supporting multiline messages. It intercepts before the input
# callback is called, as this is called with the whole message, while it is
# normally split on newline before being sent to buffer_input_callback
def input_text_for_buffer_cb(data, modifier, current_buffer, string):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return string
message = decode_from_utf8(string)
if not message.startswith("/") and "\n" in message:
buffer_input_callback("EVENTROUTER", current_buffer, message)
return ""
return string
@utf8_decode
def buffer_switch_callback(signal, sig_type, data):
"""
Every time we change channels in weechat, we call this to:
1) set read marker 2) determine if we have already populated
channel history data 3) set presence to active
"""
eventrouter = eval(signal)
prev_buffer_ptr = eventrouter.weechat_controller.get_previous_buffer_ptr()
# this is to see if we need to gray out things in the buffer list
prev = eventrouter.weechat_controller.get_channel_from_buffer_ptr(prev_buffer_ptr)
if prev:
prev.mark_read()
new_channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(data)
if new_channel:
if not new_channel.got_history:
new_channel.get_history()
set_own_presence_active(new_channel.team)
eventrouter.weechat_controller.set_previous_buffer(data)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_list_update_callback(data, somecount):
"""
incomplete
A simple timer-based callback that will update the buffer list
if needed. We only do this max 1x per second, as otherwise it
uses a lot of cpu for minimal changes. We use buffer short names
to indicate typing via "#channel" <-> ">channel" and
user presence via " name" <-> "+name".
"""
eventrouter = eval(data)
for b in eventrouter.weechat_controller.iter_buffers():
b[1].refresh()
# buffer_list_update = True
# if eventrouter.weechat_controller.check_refresh_buffer_list():
# # gray_check = False
# # if len(servers) > 1:
# # gray_check = True
# eventrouter.weechat_controller.set_refresh_buffer_list(False)
return w.WEECHAT_RC_OK
def quit_notification_callback(signal, sig_type, data):
stop_talking_to_slack()
return w.WEECHAT_RC_OK
@utf8_decode
def typing_notification_cb(data, signal, current_buffer):
msg = w.buffer_get_string(current_buffer, "input")
if len(msg) > 8 and msg[0] != "/":
global typing_timer
now = time.time()
if typing_timer + 4 < now:
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if channel and channel.type != "thread":
identifier = channel.identifier
request = {"type": "typing", "channel": identifier}
channel.team.send_to_websocket(request, expect_reply=False)
typing_timer = now
return w.WEECHAT_RC_OK
@utf8_decode
def typing_update_cb(data, remaining_calls):
w.bar_item_update("slack_typing_notice")
return w.WEECHAT_RC_OK
@utf8_decode
def slack_never_away_cb(data, remaining_calls):
if config.never_away:
for team in EVENTROUTER.teams.values():
set_own_presence_active(team)
return w.WEECHAT_RC_OK
@utf8_decode
def typing_bar_item_cb(data, item, current_window, current_buffer, extra_info):
"""
Privides a bar item indicating who is typing in the current channel AND
why is typing a DM to you globally.
"""
typers = []
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
# first look for people typing in this channel
if current_channel:
# this try is mostly becuase server buffers don't implement is_someone_typing
try:
if current_channel.type != 'im' and current_channel.is_someone_typing():
typers += current_channel.get_typing_list()
except:
pass
# here is where we notify you that someone is typing in DM
# regardless of which buffer you are in currently
for team in EVENTROUTER.teams.values():
for channel in team.channels.values():
if channel.type == "im":
if channel.is_someone_typing():
typers.append("D/" + channel.slack_name)
pass
typing = ", ".join(typers)
if typing != "":
typing = colorize_string(config.color_typing_notice, "typing: " + typing)
return typing
@utf8_decode
def away_bar_item_cb(data, item, current_window, current_buffer, extra_info):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if not channel:
return ''
if channel.team.is_user_present(channel.team.myidentifier):
return ''
else:
away_color = w.config_string(w.config_get('weechat.color.item_away'))
if channel.team.my_manual_presence == 'away':
return colorize_string(away_color, 'manual away')
else:
return colorize_string(away_color, 'auto away')
@utf8_decode
def channel_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all channels on all teams to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
should_include_channel = lambda channel: channel.active and channel.type in ['channel', 'group', 'private', 'shared']
other_teams = [team for team in EVENTROUTER.teams.values() if not current_channel or team != current_channel.team]
for team in other_teams:
for channel in team.channels.values():
if should_include_channel(channel):
w.hook_completion_list_add(completion, channel.name, 0, w.WEECHAT_LIST_POS_SORT)
if current_channel:
for channel in sorted(current_channel.team.channels.values(), key=lambda channel: channel.name, reverse=True):
if should_include_channel(channel):
w.hook_completion_list_add(completion, channel.name, 0, w.WEECHAT_LIST_POS_BEGINNING)
if should_include_channel(current_channel):
w.hook_completion_list_add(completion, current_channel.name, 0, w.WEECHAT_LIST_POS_BEGINNING)
return w.WEECHAT_RC_OK
@utf8_decode
def dm_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all dms/mpdms on all teams to completion list
"""
for team in EVENTROUTER.teams.values():
for channel in team.channels.values():
if channel.active and channel.type in ['im', 'mpim']:
w.hook_completion_list_add(completion, channel.name, 0, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def nick_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all @-prefixed nicks to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
base_command = w.hook_completion_get_string(completion, "base_command")
if base_command in ['invite', 'msg', 'query', 'whois']:
members = current_channel.team.members
else:
members = current_channel.members
for member in members:
user = current_channel.team.users.get(member)
if user and not user.deleted:
w.hook_completion_list_add(completion, user.name, 1, w.WEECHAT_LIST_POS_SORT)
w.hook_completion_list_add(completion, "@" + user.name, 1, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def emoji_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all :-prefixed emoji to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if current_channel is None:
return w.WEECHAT_RC_OK
base_word = w.hook_completion_get_string(completion, "base_word")
if ":" not in base_word:
return w.WEECHAT_RC_OK
prefix = base_word.split(":")[0] + ":"
for emoji in current_channel.team.emoji_completions:
w.hook_completion_list_add(completion, prefix + emoji + ":", 0, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def thread_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all $-prefixed thread ids to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if current_channel is None or not hasattr(current_channel, 'hashed_messages'):
return w.WEECHAT_RC_OK
threads = current_channel.hashed_messages.items()
for thread_id, message_ts in sorted(threads, key=lambda item: item[1]):
message = current_channel.messages.get(message_ts)
if message and message.number_of_replies():
w.hook_completion_list_add(completion, "$" + thread_id, 0, w.WEECHAT_LIST_POS_BEGINNING)
return w.WEECHAT_RC_OK
@utf8_decode
def topic_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds topic for current channel to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if current_channel is None:
return w.WEECHAT_RC_OK
topic = current_channel.render_topic()
channel_names = [channel.name for channel in current_channel.team.channels.values()]
if topic.split(' ', 1)[0] in channel_names:
topic = '{} {}'.format(current_channel.name, topic)
w.hook_completion_list_add(completion, topic, 0, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def usergroups_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all @-prefixed usergroups to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if current_channel is None:
return w.WEECHAT_RC_OK
subteam_handles = [subteam.handle for subteam in current_channel.team.subteams.values()]
for group in subteam_handles + ["@channel", "@everyone", "@here"]:
w.hook_completion_list_add(completion, group, 1, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def complete_next_cb(data, current_buffer, command):
"""Extract current word, if it is equal to a nick, prefix it with @ and
rely on nick_completion_cb adding the @-prefixed versions to the
completion lists, then let Weechat's internal completion do its
thing
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if not hasattr(current_channel, 'members') or current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
line_input = w.buffer_get_string(current_buffer, "input")
current_pos = w.buffer_get_integer(current_buffer, "input_pos") - 1
input_length = w.buffer_get_integer(current_buffer, "input_length")
word_start = 0
word_end = input_length
# If we're on a non-word, look left for something to complete
while current_pos >= 0 and line_input[current_pos] != '@' and not line_input[current_pos].isalnum():
current_pos = current_pos - 1
if current_pos < 0:
current_pos = 0
for l in range(current_pos, 0, -1):
if line_input[l] != '@' and not line_input[l].isalnum():
word_start = l + 1
break
for l in range(current_pos, input_length):
if not line_input[l].isalnum():
word_end = l
break
word = line_input[word_start:word_end]
for member in current_channel.members:
user = current_channel.team.users.get(member)
if user and user.name == word:
# Here, we cheat. Insert a @ in front and rely in the @
# nicks being in the completion list
w.buffer_set(current_buffer, "input", line_input[:word_start] + "@" + line_input[word_start:])
w.buffer_set(current_buffer, "input_pos", str(w.buffer_get_integer(current_buffer, "input_pos") + 1))
return w.WEECHAT_RC_OK_EAT
return w.WEECHAT_RC_OK
def script_unloaded():
stop_talking_to_slack()
return w.WEECHAT_RC_OK
def stop_talking_to_slack():
"""
complete
Prevents a race condition where quitting closes buffers
which triggers leaving the channel because of how close
buffer is handled
"""
EVENTROUTER.shutdown()
for team in EVENTROUTER.teams.values():
team.ws.shutdown()
return w.WEECHAT_RC_OK
##### New Classes
class SlackRequest(object):
"""
Encapsulates a Slack api request. Valuable as an object that we can add to the queue and/or retry.
makes a SHA of the requst url and current time so we can re-tag this on the way back through.
"""
def __init__(self, team, request, post_data=None, channel=None, metadata=None, retries=3, token=None):
if team is None and token is None:
raise ValueError("Both team and token can't be None")
self.team = team
self.request = request
self.post_data = post_data if post_data else {}
self.channel = channel
self.metadata = metadata if metadata else {}
self.retries = retries
self.token = token if token else team.token
self.tries = 0
self.start_time = time.time()
self.request_normalized = re.sub(r'\W+', '', request)
self.domain = 'api.slack.com'
self.post_data['token'] = self.token
self.url = 'https://{}/api/{}?{}'.format(self.domain, self.request, urlencode(encode_to_utf8(self.post_data)))
self.params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
self.response_id = sha1_hex('{}{}'.format(self.url, self.start_time))
def __repr__(self):
return ("SlackRequest(team={}, request='{}', post_data={}, retries={}, token='{}', "
"tries={}, start_time={})").format(self.team, self.request, self.post_data,
self.retries, token_for_print(self.token), self.tries, self.start_time)
def request_string(self):
return "{}".format(self.url)
def tried(self):
self.tries += 1
self.response_id = sha1_hex("{}{}".format(self.url, time.time()))
def should_try(self):
return self.tries < self.retries
def retry_ready(self):
return (self.start_time + (self.tries**2)) < time.time()
class SlackSubteam(object):
"""
Represents a slack group or subteam
"""
def __init__(self, originating_team_id, is_member, **kwargs):
self.handle = '@{}'.format(kwargs['handle'])
self.identifier = kwargs['id']
self.name = kwargs['name']
self.description = kwargs.get('description')
self.team_id = originating_team_id
self.is_member = is_member
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
def __eq__(self, compare_str):
return compare_str == self.identifier
class SlackTeam(object):
"""
incomplete
Team object under which users and channels live.. Does lots.
"""
def __init__(self, eventrouter, token, team_hash, websocket_url, team_info, subteams, nick, myidentifier, my_manual_presence, users, bots, channels, **kwargs):
self.identifier = team_info["id"]
self.active = True
self.team_hash = team_hash
self.ws_url = websocket_url
self.connected = False
self.connecting_rtm = False
self.connecting_ws = False
self.ws = None
self.ws_counter = 0
self.ws_replies = {}
self.last_ping_time = 0
self.last_pong_time = time.time()
self.eventrouter = eventrouter
self.token = token
self.team = self
self.subteams = subteams
self.team_info = team_info
self.subdomain = team_info["domain"]
self.domain = self.subdomain + ".slack.com"
self.preferred_name = self.domain
self.nick = nick
self.myidentifier = myidentifier
self.my_manual_presence = my_manual_presence
try:
if self.channels:
for c in channels.keys():
if not self.channels.get(c):
self.channels[c] = channels[c]
except:
self.channels = channels
self.users = users
self.bots = bots
self.name = self.domain
self.channel_buffer = None
self.got_history = True
self.create_buffer()
self.set_muted_channels(kwargs.get('muted_channels', ""))
self.set_highlight_words(kwargs.get('highlight_words', ""))
for c in self.channels.keys():
channels[c].set_related_server(self)
channels[c].check_should_open()
# Last step is to make sure my nickname is the set color
self.users[self.myidentifier].force_color(w.config_string(w.config_get('weechat.color.chat_nick_self')))
# This highlight step must happen after we have set related server
self.load_emoji_completions()
self.type = "team"
def __repr__(self):
return "domain={} nick={}".format(self.subdomain, self.nick)
def __eq__(self, compare_str):
return compare_str == self.token or compare_str == self.domain or compare_str == self.subdomain
@property
def members(self):
return self.users.keys()
def load_emoji_completions(self):
self.emoji_completions = list(EMOJI.keys())
if self.emoji_completions:
s = SlackRequest(self, "emoji.list")
self.eventrouter.receive(s)
def add_channel(self, channel):
self.channels[channel["id"]] = channel
channel.set_related_server(self)
def generate_usergroup_map(self):
return {s.handle: s.identifier for s in self.subteams.values()}
def create_buffer(self):
if not self.channel_buffer:
alias = config.server_aliases.get(self.subdomain)
if alias:
self.preferred_name = alias
elif config.short_buffer_names:
self.preferred_name = self.subdomain
else:
self.preferred_name = self.domain
self.channel_buffer = w.buffer_new(self.preferred_name, "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'server')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.nick)
w.buffer_set(self.channel_buffer, "localvar_set_server", self.preferred_name)
self.buffer_merge()
def buffer_merge(self, config_value=None):
if not config_value:
config_value = w.config_string(w.config_get('irc.look.server_buffer'))
if config_value == 'merge_with_core':
w.buffer_merge(self.channel_buffer, w.buffer_search_main())
else:
w.buffer_unmerge(self.channel_buffer, 0)
def destroy_buffer(self, update_remote):
pass
def set_muted_channels(self, muted_str):
self.muted_channels = {x for x in muted_str.split(',') if x}
for channel in self.channels.values():
channel.set_highlights()
def set_highlight_words(self, highlight_str):
self.highlight_words = {x for x in highlight_str.split(',') if x}
for channel in self.channels.values():
channel.set_highlights()
def formatted_name(self, **kwargs):
return self.domain
def buffer_prnt(self, data, message=False):
tag_name = "team_message" if message else "team_info"
w.prnt_date_tags(self.channel_buffer, SlackTS().major, tag(tag_name), data)
def send_message(self, message, subtype=None, request_dict_ext={}):
w.prnt("", "ERROR: Sending a message in the team buffer is not supported")
def find_channel_by_members(self, members, channel_type=None):
for channel in self.channels.values():
if channel.get_members() == members and (
channel_type is None or channel.type == channel_type):
return channel
def get_channel_map(self):
return {v.name: k for k, v in self.channels.items()}
def get_username_map(self):
return {v.name: k for k, v in self.users.items()}
def get_team_hash(self):
return self.team_hash
@staticmethod
def generate_team_hash(team_id, subdomain):
return str(sha1_hex("{}{}".format(team_id, subdomain)))
def refresh(self):
self.rename()
def rename(self):
pass
def is_user_present(self, user_id):
user = self.users.get(user_id)
if user and user.presence == 'active':
return True
else:
return False
def mark_read(self, ts=None, update_remote=True, force=False):
pass
def connect(self):
if not self.connected and not self.connecting_ws:
if self.ws_url:
self.connecting_ws = True
try:
# only http proxy is currently supported
proxy = ProxyWrapper()
if proxy.has_proxy == True:
ws = create_connection(self.ws_url, sslopt=sslopt_ca_certs, http_proxy_host=proxy.proxy_address, http_proxy_port=proxy.proxy_port, http_proxy_auth=(proxy.proxy_user, proxy.proxy_password))
else:
ws = create_connection(self.ws_url, sslopt=sslopt_ca_certs)
self.hook = w.hook_fd(ws.sock.fileno(), 1, 0, 0, "receive_ws_callback", self.get_team_hash())
ws.sock.setblocking(0)
self.ws = ws
self.set_reconnect_url(None)
self.set_connected()
self.connecting_ws = False
except:
w.prnt(self.channel_buffer,
'Failed connecting to slack team {}, retrying.'.format(self.domain))
dbg('connect failed with exception:\n{}'.format(format_exc_tb()), level=5)
self.connecting_ws = False
return False
elif not self.connecting_rtm:
# The fast reconnect failed, so start over-ish
for chan in self.channels:
self.channels[chan].got_history = False
s = initiate_connection(self.token, retries=999, team=self)
self.eventrouter.receive(s)
self.connecting_rtm = True
def set_connected(self):
self.connected = True
self.last_pong_time = time.time()
self.buffer_prnt('Connected to Slack team {} ({}) with username {}'.format(
self.team_info["name"], self.domain, self.nick))
dbg("connected to {}".format(self.domain))
def set_disconnected(self):
w.unhook(self.hook)
self.connected = False
def set_reconnect_url(self, url):
self.ws_url = url
def next_ws_transaction_id(self):
self.ws_counter += 1
return self.ws_counter
def send_to_websocket(self, data, expect_reply=True):
data["id"] = self.next_ws_transaction_id()
message = json.dumps(data)
try:
if expect_reply:
self.ws_replies[data["id"]] = data
self.ws.send(encode_to_utf8(message))
dbg("Sent {}...".format(message[:100]))
except (WebSocketConnectionClosedException, socket.error) as e:
handle_socket_error(e, self, 'send')
def update_member_presence(self, user, presence):
user.presence = presence
for c in self.channels:
c = self.channels[c]
if user.id in c.members:
c.update_nicklist(user.id)
def subscribe_users_presence(self):
# FIXME: There is a limitation in the API to the size of the
# json we can send.
# We should try to be smarter to fetch the users whom we want to
# subscribe to.
users = list(self.users.keys())[:750]
if self.myidentifier not in users:
users.append(self.myidentifier)
self.send_to_websocket({
"type": "presence_sub",
"ids": users,
}, expect_reply=False)
class SlackChannelCommon(object):
def send_add_reaction(self, msg_id, reaction):
self.send_change_reaction("reactions.add", msg_id, reaction)
def send_remove_reaction(self, msg_id, reaction):
self.send_change_reaction("reactions.remove", msg_id, reaction)
def send_change_reaction(self, method, msg_id, reaction):
if type(msg_id) is not int:
if msg_id in self.hashed_messages:
timestamp = str(self.hashed_messages[msg_id])
else:
return
elif 0 < msg_id <= len(self.messages):
keys = self.main_message_keys_reversed()
timestamp = next(islice(keys, msg_id - 1, None))
else:
return
data = {"channel": self.identifier, "timestamp": timestamp, "name": reaction}
s = SlackRequest(self.team, method, data, channel=self, metadata={'reaction': reaction})
self.eventrouter.receive(s)
def edit_nth_previous_message(self, msg_id, old, new, flags):
message = self.my_last_message(msg_id)
if message is None:
return
if new == "" and old == "":
s = SlackRequest(self.team, "chat.delete", {"channel": self.identifier, "ts": message['ts']}, channel=self)
self.eventrouter.receive(s)
else:
num_replace = 0 if 'g' in flags else 1
f = re.UNICODE
f |= re.IGNORECASE if 'i' in flags else 0
f |= re.MULTILINE if 'm' in flags else 0
f |= re.DOTALL if 's' in flags else 0
new_message = re.sub(old, new, message["text"], num_replace, f)
if new_message != message["text"]:
s = SlackRequest(self.team, "chat.update",
{"channel": self.identifier, "ts": message['ts'], "text": new_message}, channel=self)
self.eventrouter.receive(s)
def my_last_message(self, msg_id):
if type(msg_id) is not int:
ts = self.hashed_messages.get(msg_id)
m = self.messages.get(ts)
if m is not None and m.message_json.get("user") == self.team.myidentifier:
return m.message_json
else:
for key in self.main_message_keys_reversed():
m = self.messages[key]
if m.message_json.get("user") == self.team.myidentifier:
msg_id -= 1
if msg_id == 0:
return m.message_json
def change_message(self, ts, message_json=None, text=None):
ts = SlackTS(ts)
m = self.messages.get(ts)
if not m:
return
if message_json:
m.message_json.update(message_json)
if text:
m.change_text(text)
if type(m) == SlackMessage or config.thread_messages_in_channel:
new_text = self.render(m, force=True)
modify_buffer_line(self.channel_buffer, ts, new_text)
if type(m) == SlackThreadMessage:
thread_channel = m.parent_message.thread_channel
if thread_channel and thread_channel.active:
new_text = thread_channel.render(m, force=True)
modify_buffer_line(thread_channel.channel_buffer, ts, new_text)
def hash_message(self, ts):
ts = SlackTS(ts)
def calc_hash(ts):
return sha1_hex(str(ts))
if ts in self.messages and not self.messages[ts].hash:
message = self.messages[ts]
tshash = calc_hash(message.ts)
hl = 3
for i in range(hl, len(tshash) + 1):
shorthash = tshash[:i]
if self.hashed_messages.get(shorthash) == ts:
message.hash = shorthash
return shorthash
shorthash = tshash[:hl]
while any(x.startswith(shorthash) for x in self.hashed_messages):
hl += 1
shorthash = tshash[:hl]
if shorthash[:-1] in self.hashed_messages:
col_ts = self.hashed_messages.pop(shorthash[:-1])
col_new_hash = calc_hash(col_ts)[:hl]
self.hashed_messages[col_new_hash] = col_ts
col_msg = self.messages.get(col_ts)
if col_msg:
col_msg.hash = col_new_hash
self.change_message(str(col_msg.ts))
if col_msg.thread_channel:
col_msg.thread_channel.rename()
self.hashed_messages[shorthash] = message.ts
message.hash = shorthash
return shorthash
elif ts in self.messages:
return self.messages[ts].hash
class SlackChannel(SlackChannelCommon):
"""
Represents an individual slack channel.
"""
def __init__(self, eventrouter, **kwargs):
# We require these two things for a valid object,
# the rest we can just learn from slack
self.active = False
for key, value in kwargs.items():
setattr(self, key, value)
self.eventrouter = eventrouter
self.slack_name = kwargs["name"]
self.slack_purpose = kwargs.get("purpose", {"value": ""})
self.topic = kwargs.get("topic", {"value": ""})
self.identifier = kwargs["id"]
self.last_read = SlackTS(kwargs.get("last_read", SlackTS()))
self.channel_buffer = None
self.team = kwargs.get('team')
self.got_history = False
self.messages = OrderedDict()
self.hashed_messages = {}
self.thread_channels = {}
self.new_messages = False
self.typing = {}
self.type = 'channel'
self.set_name(self.slack_name)
# short name relates to the localvar we change for typing indication
self.current_short_name = self.name
self.set_members(kwargs.get('members', []))
self.unread_count_display = 0
self.last_line_from = None
def __eq__(self, compare_str):
if compare_str == self.slack_name or compare_str == self.formatted_name() or compare_str == self.formatted_name(style="long_default"):
return True
else:
return False
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
@property
def muted(self):
return self.identifier in self.team.muted_channels
def set_name(self, slack_name):
self.name = "#" + slack_name
def refresh(self):
return self.rename()
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(typing=self.is_someone_typing(), style="sidebar")
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def set_members(self, members):
self.members = set(members)
self.update_nicklist()
def get_members(self):
return self.members
def set_unread_count_display(self, count):
self.unread_count_display = count
self.new_messages = bool(self.unread_count_display)
if self.muted and config.muted_channels_activity != "all":
return
for c in range(self.unread_count_display):
if self.type in ["im", "mpim"]:
w.buffer_set(self.channel_buffer, "hotlist", "2")
else:
w.buffer_set(self.channel_buffer, "hotlist", "1")
def formatted_name(self, style="default", typing=False, **kwargs):
if typing and config.channel_name_typing_indicator:
prepend = ">"
elif self.type == "group" or self.type == "private":
prepend = config.group_name_prefix
elif self.type == "shared":
prepend = config.shared_name_prefix
else:
prepend = "#"
sidebar_color = config.color_buflist_muted_channels if self.muted else ""
select = {
"default": prepend + self.slack_name,
"sidebar": colorize_string(sidebar_color, prepend + self.slack_name),
"base": self.slack_name,
"long_default": "{}.{}{}".format(self.team.preferred_name, prepend, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
return select[style]
def render_topic(self, fallback_to_purpose=False):
topic = self.topic['value']
if not topic and fallback_to_purpose:
topic = self.slack_purpose['value']
return unhtmlescape(unfurl_refs(topic))
def set_topic(self, value=None):
if value is not None:
self.topic = {"value": value}
if self.channel_buffer:
topic = self.render_topic(fallback_to_purpose=True)
w.buffer_set(self.channel_buffer, "title", topic)
def update_from_message_json(self, message_json):
for key, value in message_json.items():
setattr(self, key, value)
def open(self, update_remote=True):
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["join"],
{"channel": self.identifier}, channel=self)
self.eventrouter.receive(s)
self.create_buffer()
self.active = True
self.get_history()
def check_should_open(self, force=False):
if hasattr(self, "is_archived") and self.is_archived:
return
if force:
self.create_buffer()
return
# Only check is_member if is_open is not set, because in some cases
# (e.g. group DMs), is_member should be ignored in favor of is_open.
is_open = self.is_open if hasattr(self, "is_open") else self.is_member
if is_open or self.unread_count_display:
self.create_buffer()
if config.background_load_all_history:
self.get_history(slow_queue=True)
def set_related_server(self, team):
self.team = team
def highlights(self):
nick_highlights = {'@' + self.team.nick, self.team.myidentifier}
subteam_highlights = {subteam.handle for subteam in self.team.subteams.values()
if subteam.is_member}
highlights = nick_highlights | subteam_highlights | self.team.highlight_words
if self.muted and config.muted_channels_activity == "personal_highlights":
return highlights
else:
return highlights | {"@channel", "@everyone", "@group", "@here"}
def set_highlights(self):
# highlight my own name and any set highlights
if self.channel_buffer:
h_str = ",".join(self.highlights())
w.buffer_set(self.channel_buffer, "highlight_words", h_str)
if self.muted and config.muted_channels_activity != "all":
notify_level = "0" if config.muted_channels_activity == "none" else "1"
w.buffer_set(self.channel_buffer, "notify", notify_level)
else:
w.buffer_set(self.channel_buffer, "notify", "3")
if self.muted and config.muted_channels_activity == "none":
w.buffer_set(self.channel_buffer, "highlight_tags_restrict", "highlight_force")
else:
w.buffer_set(self.channel_buffer, "highlight_tags_restrict", "")
for thread_channel in self.thread_channels.values():
thread_channel.set_highlights(h_str)
def create_buffer(self):
"""
Creates the weechat buffer where the channel magic happens.
"""
if not self.channel_buffer:
self.active = True
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
if self.type == "im":
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
else:
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
self.set_highlights()
self.set_topic()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.preferred_name)
self.update_nicklist()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["info"],
{"channel": self.identifier}, channel=self)
self.eventrouter.receive(s)
if self.type == "im":
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["join"],
{"users": self.user, "return_im": True}, channel=self)
self.eventrouter.receive(s)
def clear_messages(self):
w.buffer_clear(self.channel_buffer)
self.messages = OrderedDict()
self.got_history = False
def destroy_buffer(self, update_remote):
self.clear_messages()
self.channel_buffer = None
self.active = False
if update_remote and not self.eventrouter.shutting_down:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["leave"],
{"channel": self.identifier}, channel=self)
self.eventrouter.receive(s)
def buffer_prnt(self, nick, text, timestamp=str(time.time()), tagset=None, tag_nick=None, history_message=False, extra_tags=None):
data = "{}\t{}".format(format_nick(nick, self.last_line_from), text)
self.last_line_from = nick
ts = SlackTS(timestamp)
last_read = SlackTS(self.last_read)
# without this, DMs won't open automatically
if not self.channel_buffer and ts > last_read:
self.open(update_remote=False)
if self.channel_buffer:
# backlog messages - we will update the read marker as we print these
backlog = ts <= last_read
if not backlog:
self.new_messages = True
if not tagset:
if self.type in ["im", "mpim"]:
tagset = "dm"
else:
tagset = "channel"
no_log = history_message and backlog
self_msg = tag_nick == self.team.nick
tags = tag(tagset, user=tag_nick, self_msg=self_msg, backlog=backlog, no_log=no_log, extra_tags=extra_tags)
try:
if (config.unhide_buffers_with_activity
and not self.is_visible() and not self.muted):
w.buffer_set(self.channel_buffer, "hidden", "0")
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_last_print_time(self.channel_buffer, ts.minor)
if backlog or self_msg:
self.mark_read(ts, update_remote=False, force=True)
except:
dbg("Problem processing buffer_prnt")
def send_message(self, message, subtype=None, request_dict_ext={}):
message = linkify_text(message, self.team)
dbg(message)
if subtype == 'me_message':
s = SlackRequest(self.team, "chat.meMessage", {"channel": self.identifier, "text": message}, channel=self)
self.eventrouter.receive(s)
else:
request = {"type": "message", "channel": self.identifier,
"text": message, "user": self.team.myidentifier}
request.update(request_dict_ext)
self.team.send_to_websocket(request)
def store_message(self, message, team, from_me=False):
if not self.active:
return
if from_me:
message.message_json["user"] = team.myidentifier
self.messages[SlackTS(message.ts)] = message
sorted_messages = sorted(self.messages.items())
messages_to_delete = sorted_messages[:-SCROLLBACK_SIZE]
messages_to_keep = sorted_messages[-SCROLLBACK_SIZE:]
for message_hash in [m[1].hash for m in messages_to_delete]:
if message_hash in self.hashed_messages:
del self.hashed_messages[message_hash]
self.messages = OrderedDict(messages_to_keep)
def is_visible(self):
return w.buffer_get_integer(self.channel_buffer, "hidden") == 0
def get_history(self, slow_queue=False):
if not self.got_history:
# we have probably reconnected. flush the buffer
if self.team.connected:
self.clear_messages()
w.prnt_date_tags(self.channel_buffer, SlackTS().major,
tag(backlog=True, no_log=True), '\tgetting channel history...')
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["history"],
{"channel": self.identifier, "count": BACKLOG_SIZE}, channel=self, metadata={'clear': True})
if not slow_queue:
self.eventrouter.receive(s)
else:
self.eventrouter.receive_slow(s)
self.got_history = True
def main_message_keys_reversed(self):
return (key for key in reversed(self.messages)
if type(self.messages[key]) == SlackMessage)
# Typing related
def set_typing(self, user):
if self.channel_buffer and self.is_visible():
self.typing[user] = time.time()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def unset_typing(self, user):
if self.channel_buffer and self.is_visible():
u = self.typing.get(user)
if u:
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def is_someone_typing(self):
"""
Walks through dict of typing folks in a channel and fast
returns if any of them is actively typing. If none are,
nulls the dict and returns false.
"""
for user, timestamp in self.typing.items():
if timestamp + 4 > time.time():
return True
if len(self.typing) > 0:
self.typing = {}
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
return False
def get_typing_list(self):
"""
Returns the names of everyone in the channel who is currently typing.
"""
typing = []
for user, timestamp in self.typing.items():
if timestamp + 4 > time.time():
typing.append(user)
else:
del self.typing[user]
return typing
def mark_read(self, ts=None, update_remote=True, force=False):
if self.new_messages or force:
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
if not ts:
ts = next(reversed(self.messages), SlackTS())
if ts > self.last_read:
self.last_read = ts
if update_remote:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["mark"],
{"channel": self.identifier, "ts": ts}, channel=self)
self.eventrouter.receive(s)
self.new_messages = False
def user_joined(self, user_id):
# ugly hack - for some reason this gets turned into a list
self.members = set(self.members)
self.members.add(user_id)
self.update_nicklist(user_id)
def user_left(self, user_id):
self.members.discard(user_id)
self.update_nicklist(user_id)
def update_nicklist(self, user=None):
if not self.channel_buffer:
return
if self.type not in ["channel", "group", "mpim", "private", "shared"]:
return
w.buffer_set(self.channel_buffer, "nicklist", "1")
# create nicklists for the current channel if they don't exist
# if they do, use the existing pointer
here = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_HERE)
if not here:
here = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_HERE, "weechat.color.nicklist_group", 1)
afk = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_AWAY)
if not afk:
afk = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_AWAY, "weechat.color.nicklist_group", 1)
# Add External nicklist group only for shared channels
if self.type == 'shared':
external = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_EXTERNAL)
if not external:
external = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_EXTERNAL, 'weechat.color.nicklist_group', 2)
if user and len(self.members) < 1000:
user = self.team.users.get(user)
# External users that have left shared channels won't exist
if not user or user.deleted:
return
nick = w.nicklist_search_nick(self.channel_buffer, "", user.name)
# since this is a change just remove it regardless of where it is
w.nicklist_remove_nick(self.channel_buffer, nick)
# now add it back in to whichever..
nick_group = afk
if user.is_external:
nick_group = external
elif self.team.is_user_present(user.identifier):
nick_group = here
if user.identifier in self.members:
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
# if we didn't get a user, build a complete list. this is expensive.
else:
if len(self.members) < 1000:
try:
for user in self.members:
user = self.team.users.get(user)
if user.deleted:
continue
nick_group = afk
if user.is_external:
nick_group = external
elif self.team.is_user_present(user.identifier):
nick_group = here
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
except:
dbg("DEBUG: {} {} {}".format(self.identifier, self.name, format_exc_only()))
else:
w.nicklist_remove_all(self.channel_buffer)
for fn in ["1| too", "2| many", "3| users", "4| to", "5| show"]:
w.nicklist_add_group(self.channel_buffer, '', fn, w.color('white'), 1)
def render(self, message, force=False):
text = message.render(force)
if isinstance(message, SlackThreadMessage):
thread_id = message.parent_message.hash or message.parent_message.ts
return colorize_string(get_thread_color(thread_id), '[{}]'.format(thread_id)) + ' {}'.format(text)
return text
class SlackDMChannel(SlackChannel):
"""
Subclass of a normal channel for person-to-person communication, which
has some important differences.
"""
def __init__(self, eventrouter, users, **kwargs):
dmuser = kwargs["user"]
kwargs["name"] = users[dmuser].name if dmuser in users else dmuser
super(SlackDMChannel, self).__init__(eventrouter, **kwargs)
self.type = 'im'
self.update_color()
self.set_name(self.slack_name)
if dmuser in users:
self.set_topic(create_user_status_string(users[dmuser].profile))
def set_related_server(self, team):
super(SlackDMChannel, self).set_related_server(team)
if self.user not in self.team.users:
s = SlackRequest(self.team, 'users.info', {'user': self.slack_name}, channel=self)
self.eventrouter.receive(s)
def set_name(self, slack_name):
self.name = slack_name
def get_members(self):
return {self.user}
def create_buffer(self):
if not self.channel_buffer:
super(SlackDMChannel, self).create_buffer()
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
def update_color(self):
if config.colorize_private_chats:
self.color_name = get_nick_color(self.name)
else:
self.color_name = ""
def formatted_name(self, style="default", typing=False, present=True, enable_color=False, **kwargs):
prepend = ""
if config.show_buflist_presence:
prepend = "+" if present else " "
select = {
"default": self.slack_name,
"sidebar": prepend + self.slack_name,
"base": self.slack_name,
"long_default": "{}.{}".format(self.team.preferred_name, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
if config.colorize_private_chats and enable_color:
return colorize_string(self.color_name, select[style])
else:
return select[style]
def open(self, update_remote=True):
self.create_buffer()
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["info"],
{"name": self.identifier}, channel=self)
self.eventrouter.receive(s)
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["join"],
{"users": self.user, "return_im": True}, channel=self)
self.eventrouter.receive(s)
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(style="sidebar", present=self.team.is_user_present(self.user), enable_color=config.colorize_private_chats)
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def refresh(self):
return self.rename()
class SlackGroupChannel(SlackChannel):
"""
A group channel is a private discussion group.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackGroupChannel, self).__init__(eventrouter, **kwargs)
self.type = "group"
self.set_name(self.slack_name)
def set_name(self, slack_name):
self.name = config.group_name_prefix + slack_name
class SlackPrivateChannel(SlackGroupChannel):
"""
A private channel is a private discussion group. At the time of writing, it
differs from group channels in that group channels are channels initially
created as private, while private channels are public channels which are
later converted to private.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackPrivateChannel, self).__init__(eventrouter, **kwargs)
self.type = "private"
def set_related_server(self, team):
super(SlackPrivateChannel, self).set_related_server(team)
# Fetch members here (after the team is known) since they aren't
# included in rtm.start
s = SlackRequest(team, 'conversations.members', {'channel': self.identifier}, channel=self)
self.eventrouter.receive(s)
class SlackMPDMChannel(SlackChannel):
"""
An MPDM channel is a special instance of a 'group' channel.
We change the name to look less terrible in weechat.
"""
def __init__(self, eventrouter, team_users, myidentifier, **kwargs):
kwargs["name"] = ','.join(sorted(
getattr(team_users.get(user_id), 'name', user_id)
for user_id in kwargs["members"]
if user_id != myidentifier
))
super(SlackMPDMChannel, self).__init__(eventrouter, **kwargs)
self.type = "mpim"
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]["info"],
{"channel": self.identifier}, channel=self)
self.eventrouter.receive(s)
if update_remote and 'join' in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team, SLACK_API_TRANSLATOR[self.type]['join'],
{'users': ','.join(self.members)}, channel=self)
self.eventrouter.receive(s)
def set_name(self, slack_name):
self.name = slack_name
def formatted_name(self, style="default", typing=False, **kwargs):
if typing and config.channel_name_typing_indicator:
prepend = ">"
else:
prepend = "@"
select = {
"default": self.name,
"sidebar": prepend + self.name,
"base": self.name,
"long_default": "{}.{}".format(self.team.preferred_name, self.name),
"long_base": "{}.{}".format(self.team.preferred_name, self.name),
}
return select[style]
def rename(self):
pass
class SlackSharedChannel(SlackChannel):
def __init__(self, eventrouter, **kwargs):
super(SlackSharedChannel, self).__init__(eventrouter, **kwargs)
self.type = 'shared'
def set_related_server(self, team):
super(SlackSharedChannel, self).set_related_server(team)
# Fetch members here (after the team is known) since they aren't
# included in rtm.start
s = SlackRequest(team, 'conversations.members', {'channel': self.identifier}, channel=self)
self.eventrouter.receive(s)
def get_history(self, slow_queue=False):
# Get info for external users in the channel
for user in self.members - set(self.team.users.keys()):
s = SlackRequest(self.team, 'users.info', {'user': user}, channel=self)
self.eventrouter.receive(s)
super(SlackSharedChannel, self).get_history(slow_queue)
def set_name(self, slack_name):
self.name = config.shared_name_prefix + slack_name
class SlackThreadChannel(SlackChannelCommon):
"""
A thread channel is a virtual channel. We don't inherit from
SlackChannel, because most of how it operates will be different.
"""
def __init__(self, eventrouter, parent_message):
self.eventrouter = eventrouter
self.parent_message = parent_message
self.hashed_messages = {}
self.channel_buffer = None
self.type = "thread"
self.got_history = False
self.label = None
self.members = self.parent_message.channel.members
self.team = self.parent_message.team
self.last_line_from = None
@property
def identifier(self):
return self.parent_message.channel.identifier
@property
def messages(self):
return self.parent_message.channel.messages
@property
def muted(self):
return self.parent_message.channel.muted
def formatted_name(self, style="default", **kwargs):
hash_or_ts = self.parent_message.hash or self.parent_message.ts
styles = {
"default": " +{}".format(hash_or_ts),
"long_default": "{}.{}".format(self.parent_message.channel.formatted_name(style="long_default"), hash_or_ts),
"sidebar": " +{}".format(hash_or_ts),
}
return styles[style]
def refresh(self):
self.rename()
def mark_read(self, ts=None, update_remote=True, force=False):
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
def buffer_prnt(self, nick, text, timestamp, tag_nick=None):
data = "{}\t{}".format(format_nick(nick, self.last_line_from), text)
self.last_line_from = nick
ts = SlackTS(timestamp)
if self.channel_buffer:
if self.parent_message.channel.type in ["im", "mpim"]:
tagset = "dm"
else:
tagset = "channel"
self_msg = tag_nick == self.team.nick
tags = tag(tagset, user=tag_nick, self_msg=self_msg)
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_last_print_time(self.channel_buffer, ts.minor)
if self_msg:
self.mark_read(ts, update_remote=False, force=True)
def get_history(self):
self.got_history = True
for message in chain([self.parent_message], self.parent_message.submessages):
text = self.render(message)
self.buffer_prnt(message.sender, text, message.ts, tag_nick=message.sender_plain)
if len(self.parent_message.submessages) < self.parent_message.number_of_replies():
s = SlackRequest(self.team, "conversations.replies",
{"channel": self.identifier, "ts": self.parent_message.ts},
channel=self.parent_message.channel)
self.eventrouter.receive(s)
def main_message_keys_reversed(self):
return (message.ts for message in reversed(self.parent_message.submessages))
def send_message(self, message, subtype=None, request_dict_ext={}):
if subtype == 'me_message':
w.prnt("", "ERROR: /me is not supported in threads")
return w.WEECHAT_RC_ERROR
message = linkify_text(message, self.team)
dbg(message)
request = {"type": "message", "text": message,
"channel": self.parent_message.channel.identifier,
"thread_ts": str(self.parent_message.ts),
"user": self.team.myidentifier}
request.update(request_dict_ext)
self.team.send_to_websocket(request)
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
def rename(self):
if self.channel_buffer and not self.label:
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
def set_highlights(self, highlight_string=None):
if self.channel_buffer:
if highlight_string is None:
highlight_string = ",".join(self.parent_message.channel.highlights())
w.buffer_set(self.channel_buffer, "highlight_words", highlight_string)
def create_buffer(self):
"""
Creates the weechat buffer where the thread magic happens.
"""
if not self.channel_buffer:
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.preferred_name)
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
self.set_highlights()
time_format = w.config_string(w.config_get("weechat.look.buffer_time_format"))
parent_time = time.localtime(SlackTS(self.parent_message.ts).major)
topic = '{} {} | {}'.format(time.strftime(time_format, parent_time), self.parent_message.sender, self.render(self.parent_message) )
w.buffer_set(self.channel_buffer, "title", topic)
# self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def destroy_buffer(self, update_remote):
self.channel_buffer = None
self.got_history = False
self.active = False
def render(self, message, force=False):
return message.render(force)
class SlackUser(object):
"""
Represends an individual slack user. Also where you set their name formatting.
"""
def __init__(self, originating_team_id, **kwargs):
self.identifier = kwargs["id"]
# These attributes may be missing in the response, so we have to make
# sure they're set
self.profile = {}
self.presence = kwargs.get("presence", "unknown")
self.deleted = kwargs.get("deleted", False)
self.is_external = (not kwargs.get("is_bot") and
kwargs.get("team_id") != originating_team_id)
for key, value in kwargs.items():
setattr(self, key, value)
self.name = nick_from_profile(self.profile, kwargs["name"])
self.username = kwargs["name"]
self.update_color()
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
def force_color(self, color_name):
self.color_name = color_name
def update_color(self):
# This will automatically be none/"" if the user has disabled nick
# colourization.
self.color_name = get_nick_color(self.name)
def update_status(self, status_emoji, status_text):
self.profile["status_emoji"] = status_emoji
self.profile["status_text"] = status_text
def formatted_name(self, prepend="", enable_color=True):
name = prepend + self.name
if enable_color:
return colorize_string(self.color_name, name)
else:
return name
class SlackBot(SlackUser):
"""
Basically the same as a user, but split out to identify and for future
needs
"""
def __init__(self, originating_team_id, **kwargs):
super(SlackBot, self).__init__(originating_team_id, is_bot=True, **kwargs)
class SlackMessage(object):
"""
Represents a single slack message and associated context/metadata.
These are modifiable and can be rerendered to change a message,
delete a message, add a reaction, add a thread.
Note: these can't be tied to a SlackUser object because users
can be deleted, so we have to store sender in each one.
"""
def __init__(self, message_json, team, channel, override_sender=None):
self.team = team
self.channel = channel
self.message_json = message_json
self.submessages = []
self.hash = None
if override_sender:
self.sender = override_sender
self.sender_plain = override_sender
else:
senders = self.get_sender()
self.sender, self.sender_plain = senders[0], senders[1]
self.ts = SlackTS(message_json['ts'])
def __hash__(self):
return hash(self.ts)
@property
def thread_channel(self):
return self.channel.thread_channels.get(self.ts)
def open_thread(self, switch=False):
if not self.thread_channel or not self.thread_channel.active:
self.channel.thread_channels[self.ts] = SlackThreadChannel(EVENTROUTER, self)
self.thread_channel.open()
if switch:
w.buffer_set(self.thread_channel.channel_buffer, "display", "1")
def render(self, force=False):
# If we already have a rendered version in the object, just return that.
if not force and self.message_json.get("_rendered_text"):
return self.message_json["_rendered_text"]
if "fallback" in self.message_json:
text = self.message_json["fallback"]
elif self.message_json.get("text"):
text = self.message_json["text"]
else:
text = ""
if self.message_json.get('mrkdwn', True):
text = render_formatting(text)
if (self.message_json.get('subtype') in ('channel_join', 'group_join') and
self.message_json.get('inviter')):
inviter_id = self.message_json.get('inviter')
text += " by invitation from <@{}>".format(inviter_id)
if "blocks" in self.message_json:
text += unfurl_blocks(self.message_json)
text = unfurl_refs(text)
if (self.message_json.get('subtype') == 'me_message' and
not self.message_json['text'].startswith(self.sender)):
text = "{} {}".format(self.sender, text)
if "edited" in self.message_json:
text += " " + colorize_string(config.color_edited_suffix, '(edited)')
text += unfurl_refs(unwrap_attachments(self.message_json, text))
text += unfurl_refs(unwrap_files(self.message_json, text))
text = unhtmlescape(text.lstrip().replace("\t", " "))
text += create_reactions_string(
self.message_json.get("reactions", ""), self.team.myidentifier)
if self.number_of_replies():
self.channel.hash_message(self.ts)
text += " " + colorize_string(get_thread_color(self.hash), "[ Thread: {} Replies: {} ]".format(
self.hash, self.number_of_replies()))
text = replace_string_with_emoji(text)
self.message_json["_rendered_text"] = text
return text
def change_text(self, new_text):
self.message_json["text"] = new_text
dbg(self.message_json)
def get_sender(self):
name = ""
name_plain = ""
user = self.team.users.get(self.message_json.get('user'))
if user:
name = "{}".format(user.formatted_name())
name_plain = "{}".format(user.formatted_name(enable_color=False))
if user.is_external:
name += config.external_user_suffix
name_plain += config.external_user_suffix
elif 'username' in self.message_json:
username = self.message_json["username"]
if self.message_json.get("subtype") == "bot_message":
name = "{} :]".format(username)
name_plain = "{}".format(username)
else:
name = "-{}-".format(username)
name_plain = "{}".format(username)
elif 'service_name' in self.message_json:
name = "-{}-".format(self.message_json["service_name"])
name_plain = "{}".format(self.message_json["service_name"])
elif self.message_json.get('bot_id') in self.team.bots:
name = "{} :]".format(self.team.bots[self.message_json["bot_id"]].formatted_name())
name_plain = "{}".format(self.team.bots[self.message_json["bot_id"]].formatted_name(enable_color=False))
return (name, name_plain)
def add_reaction(self, reaction, user):
m = self.message_json.get('reactions')
if m:
found = False
for r in m:
if r["name"] == reaction and user not in r["users"]:
r["users"].append(user)
found = True
if not found:
self.message_json["reactions"].append({"name": reaction, "users": [user]})
else:
self.message_json["reactions"] = [{"name": reaction, "users": [user]}]
def remove_reaction(self, reaction, user):
m = self.message_json.get('reactions')
if m:
for r in m:
if r["name"] == reaction and user in r["users"]:
r["users"].remove(user)
def has_mention(self):
return w.string_has_highlight(unfurl_refs(self.message_json.get('text')),
",".join(self.channel.highlights()))
def number_of_replies(self):
return max(len(self.submessages), len(self.message_json.get("replies", [])))
def notify_thread(self, action=None, sender_id=None):
if config.auto_open_threads:
self.open_thread()
elif sender_id != self.team.myidentifier:
if action == "mention":
template = "You were mentioned in thread {hash}, channel {channel}"
elif action == "participant":
template = "New message in thread {hash}, channel {channel} in which you participated"
elif action == "response":
template = "New message in thread {hash} in response to own message in {channel}"
else:
template = "Notification for message in thread {hash}, channel {channel}"
message = template.format(hash=self.hash, channel=self.channel.formatted_name())
self.team.buffer_prnt(message, message=True)
class SlackThreadMessage(SlackMessage):
def __init__(self, parent_message, *args):
super(SlackThreadMessage, self).__init__(*args)
self.parent_message = parent_message
class Hdata(object):
def __init__(self, w):
self.buffer = w.hdata_get('buffer')
self.line = w.hdata_get('line')
self.line_data = w.hdata_get('line_data')
self.lines = w.hdata_get('lines')
class SlackTS(object):
def __init__(self, ts=None):
if ts:
self.major, self.minor = [int(x) for x in ts.split('.', 1)]
else:
self.major = int(time.time())
self.minor = 0
def __cmp__(self, other):
if isinstance(other, SlackTS):
if self.major < other.major:
return -1
elif self.major > other.major:
return 1
elif self.major == other.major:
if self.minor < other.minor:
return -1
elif self.minor > other.minor:
return 1
else:
return 0
elif isinstance(other, str):
s = self.__str__()
if s < other:
return -1
elif s > other:
return 1
elif s == other:
return 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __hash__(self):
return hash("{}.{}".format(self.major, self.minor))
def __repr__(self):
return str("{0}.{1:06d}".format(self.major, self.minor))
def split(self, *args, **kwargs):
return [self.major, self.minor]
def majorstr(self):
return str(self.major)
def minorstr(self):
return str(self.minor)
###### New handlers
def handle_rtmstart(login_data, eventrouter, team, channel, metadata):
"""
This handles the main entry call to slack, rtm.start
"""
metadata = login_data["wee_slack_request_metadata"]
if not login_data["ok"]:
w.prnt("", "ERROR: Failed connecting to Slack with token {}: {}"
.format(token_for_print(metadata.token), login_data["error"]))
if not re.match(r"^xo\w\w(-\d+){3}-[0-9a-f]+$", metadata.token):
w.prnt("", "ERROR: Token does not look like a valid Slack token. "
"Ensure it is a valid token and not just a OAuth code.")
return
self_profile = next(
user["profile"]
for user in login_data["users"]
if user["id"] == login_data["self"]["id"]
)
self_nick = nick_from_profile(self_profile, login_data["self"]["name"])
# Let's reuse a team if we have it already.
th = SlackTeam.generate_team_hash(login_data['team']['id'], login_data['team']['domain'])
if not eventrouter.teams.get(th):
users = {}
for item in login_data["users"]:
users[item["id"]] = SlackUser(login_data['team']['id'], **item)
bots = {}
for item in login_data["bots"]:
bots[item["id"]] = SlackBot(login_data['team']['id'], **item)
subteams = {}
for item in login_data["subteams"]["all"]:
is_member = item['id'] in login_data["subteams"]["self"]
subteams[item['id']] = SlackSubteam(
login_data['team']['id'], is_member=is_member, **item)
channels = {}
for item in login_data["channels"]:
if item["is_shared"]:
channels[item["id"]] = SlackSharedChannel(eventrouter, **item)
elif item["is_private"]:
channels[item["id"]] = SlackPrivateChannel(eventrouter, **item)
else:
channels[item["id"]] = SlackChannel(eventrouter, **item)
for item in login_data["ims"]:
channels[item["id"]] = SlackDMChannel(eventrouter, users, **item)
for item in login_data["groups"]:
if item["is_mpim"]:
channels[item["id"]] = SlackMPDMChannel(eventrouter, users, login_data["self"]["id"], **item)
else:
channels[item["id"]] = SlackGroupChannel(eventrouter, **item)
t = SlackTeam(
eventrouter,
metadata.token,
th,
login_data['url'],
login_data["team"],
subteams,
self_nick,
login_data["self"]["id"],
login_data["self"]["manual_presence"],
users,
bots,
channels,
muted_channels=login_data["self"]["prefs"]["muted_channels"],
highlight_words=login_data["self"]["prefs"]["highlight_words"],
)
eventrouter.register_team(t)
else:
t = eventrouter.teams.get(th)
if t.myidentifier != login_data["self"]["id"]:
print_error(
'The Slack team {} has tokens for two different users, this is not supported. The '
'token {} is for user {}, and the token {} is for user {}. Please remove one of '
'them.'.format(t.team_info["name"], token_for_print(t.token), t.nick,
token_for_print(metadata.token), self_nick)
)
return
elif metadata.metadata.get('initial_connection'):
print_error(
'Ignoring duplicate Slack tokens for the same team ({}) and user ({}). The two '
'tokens are {} and {}.'.format(t.team_info["name"], t.nick,
token_for_print(t.token), token_for_print(metadata.token)),
warning=True
)
return
else:
t.set_reconnect_url(login_data['url'])
t.connecting_rtm = False
t.connect()
def handle_rtmconnect(login_data, eventrouter, team, channel, metadata):
metadata = login_data["wee_slack_request_metadata"]
team = metadata.team
team.connecting_rtm = False
if not login_data["ok"]:
w.prnt("", "ERROR: Failed reconnecting to Slack with token {}: {}"
.format(token_for_print(metadata.token), login_data["error"]))
return
team.set_reconnect_url(login_data['url'])
team.connect()
def handle_emojilist(emoji_json, eventrouter, team, channel, metadata):
if emoji_json["ok"]:
team.emoji_completions.extend(emoji_json["emoji"].keys())
def handle_channelsinfo(channel_json, eventrouter, team, channel, metadata):
channel.set_unread_count_display(channel_json['channel'].get('unread_count_display', 0))
channel.set_members(channel_json['channel']['members'])
def handle_groupsinfo(group_json, eventrouter, team, channel, metadatas):
channel.set_unread_count_display(group_json['group'].get('unread_count_display', 0))
def handle_conversationsopen(conversation_json, eventrouter, team, channel, metadata, object_name='channel'):
# Set unread count if the channel isn't new
if channel:
unread_count_display = conversation_json[object_name].get('unread_count_display', 0)
channel.set_unread_count_display(unread_count_display)
def handle_mpimopen(mpim_json, eventrouter, team, channel, metadata, object_name='group'):
handle_conversationsopen(mpim_json, eventrouter, team, channel, metadata, object_name)
def handle_history(message_json, eventrouter, team, channel, metadata):
if metadata['clear']:
channel.clear_messages()
channel.got_history = True
for message in reversed(message_json["messages"]):
process_message(message, eventrouter, team, channel, metadata, history_message=True)
handle_channelshistory = handle_history
handle_conversationshistory = handle_history
handle_groupshistory = handle_history
handle_imhistory = handle_history
handle_mpimhistory = handle_history
def handle_conversationsreplies(message_json, eventrouter, team, channel, metadata):
for message in message_json['messages']:
process_message(message, eventrouter, team, channel, metadata)
def handle_conversationsmembers(members_json, eventrouter, team, channel, metadata):
if members_json['ok']:
channel.set_members(members_json['members'])
else:
w.prnt(team.channel_buffer, '{}Couldn\'t load members for channel {}. Error: {}'
.format(w.prefix('error'), channel.name, members_json['error']))
def handle_usersinfo(user_json, eventrouter, team, channel, metadata):
user_info = user_json['user']
if not metadata.get('user'):
user = SlackUser(team.identifier, **user_info)
team.users[user_info['id']] = user
if channel.type == 'shared':
channel.update_nicklist(user_info['id'])
elif channel.type == 'im':
channel.slack_name = user.name
channel.set_topic(create_user_status_string(user.profile))
def handle_usergroupsuserslist(users_json, eventrouter, team, channel, metadata):
header = 'Users in {}'.format(metadata['usergroup_handle'])
users = [team.users[key] for key in users_json['users']]
return print_users_info(team, header, users)
def handle_usersprofileset(json, eventrouter, team, channel, metadata):
if not json['ok']:
w.prnt('', 'ERROR: Failed to set profile: {}'.format(json['error']))
def handle_conversationsinvite(json, eventrouter, team, channel, metadata):
nicks = ', '.join(metadata['nicks'])
if json['ok']:
w.prnt(team.channel_buffer, 'Invited {} to {}'.format(nicks, channel.name))
else:
w.prnt(team.channel_buffer, 'ERROR: Couldn\'t invite {} to {}. Error: {}'
.format(nicks, channel.name, json['error']))
def handle_chatcommand(json, eventrouter, team, channel, metadata):
command = '{} {}'.format(metadata['command'], metadata['command_args']).rstrip()
response = unfurl_refs(json['response']) if 'response' in json else ''
if json['ok']:
response_text = 'Response: {}'.format(response) if response else 'No response'
w.prnt(team.channel_buffer, 'Ran command "{}". {}' .format(command, response_text))
else:
response_text = '. Response: {}'.format(response) if response else ''
w.prnt(team.channel_buffer, 'ERROR: Couldn\'t run command "{}". Error: {}{}'
.format(command, json['error'], response_text))
def handle_reactionsadd(json, eventrouter, team, channel, metadata):
if not json['ok']:
print_error("Couldn't add reaction {}: {}".format(metadata['reaction'], json['error']))
def handle_reactionsremove(json, eventrouter, team, channel, metadata):
if not json['ok']:
print_error("Couldn't remove reaction {}: {}".format(metadata['reaction'], json['error']))
###### New/converted process_ and subprocess_ methods
def process_hello(message_json, eventrouter, team, channel, metadata):
team.subscribe_users_presence()
def process_reconnect_url(message_json, eventrouter, team, channel, metadata):
team.set_reconnect_url(message_json['url'])
def process_presence_change(message_json, eventrouter, team, channel, metadata):
users = [team.users[user_id] for user_id in message_json.get("users", [])]
if "user" in metadata:
users.append(metadata["user"])
for user in users:
team.update_member_presence(user, message_json["presence"])
if team.myidentifier in users:
w.bar_item_update("away")
w.bar_item_update("slack_away")
def process_manual_presence_change(message_json, eventrouter, team, channel, metadata):
team.my_manual_presence = message_json["presence"]
w.bar_item_update("away")
w.bar_item_update("slack_away")
def process_pref_change(message_json, eventrouter, team, channel, metadata):
if message_json['name'] == 'muted_channels':
team.set_muted_channels(message_json['value'])
elif message_json['name'] == 'highlight_words':
team.set_highlight_words(message_json['value'])
else:
dbg("Preference change not implemented: {}\n".format(message_json['name']))
def process_user_change(message_json, eventrouter, team, channel, metadata):
"""
Currently only used to update status, but lots here we could do.
"""
user = metadata['user']
profile = message_json['user']['profile']
if user:
user.update_status(profile.get('status_emoji'), profile.get('status_text'))
dmchannel = team.find_channel_by_members({user.identifier}, channel_type='im')
if dmchannel:
dmchannel.set_topic(create_user_status_string(profile))
def process_user_typing(message_json, eventrouter, team, channel, metadata):
if channel:
channel.set_typing(metadata["user"].name)
w.bar_item_update("slack_typing_notice")
def process_team_join(message_json, eventrouter, team, channel, metadata):
user = message_json['user']
team.users[user["id"]] = SlackUser(team.identifier, **user)
def process_pong(message_json, eventrouter, team, channel, metadata):
team.last_pong_time = time.time()
def process_message(message_json, eventrouter, team, channel, metadata, history_message=False):
if SlackTS(message_json["ts"]) in channel.messages:
return
if "thread_ts" in message_json and "reply_count" not in message_json and "subtype" not in message_json:
if message_json.get("reply_broadcast"):
message_json["subtype"] = "thread_broadcast"
else:
message_json["subtype"] = "thread_message"
subtype = message_json.get("subtype")
subtype_functions = get_functions_with_prefix("subprocess_")
if subtype in subtype_functions:
subtype_functions[subtype](message_json, eventrouter, team, channel, history_message)
else:
message = SlackMessage(message_json, team, channel)
channel.store_message(message, team)
text = channel.render(message)
dbg("Rendered message: %s" % text)
dbg("Sender: %s (%s)" % (message.sender, message.sender_plain))
if subtype == 'me_message':
prefix = w.prefix("action").rstrip()
else:
prefix = message.sender
channel.buffer_prnt(prefix, text, message.ts, tag_nick=message.sender_plain, history_message=history_message)
channel.unread_count_display += 1
dbg("NORMAL REPLY {}".format(message_json))
if not history_message:
download_files(message_json, team)
def download_files(message_json, team):
download_location = config.files_download_location
if not download_location:
return
download_location = w.string_eval_path_home(download_location, {}, {}, {})
if not os.path.exists(download_location):
try:
os.makedirs(download_location)
except:
w.prnt('', 'ERROR: Failed to create directory at files_download_location: {}'
.format(format_exc_only()))
def fileout_iter(path):
yield path
main, ext = os.path.splitext(path)
for i in count(start=1):
yield main + "-{}".format(i) + ext
for f in message_json.get('files', []):
if f.get('mode') == 'tombstone':
continue
filetype = '' if f['title'].endswith(f['filetype']) else '.' + f['filetype']
filename = '{}_{}{}'.format(team.preferred_name, f['title'], filetype)
for fileout in fileout_iter(os.path.join(download_location, filename)):
if os.path.isfile(fileout):
continue
w.hook_process_hashtable(
"url:" + f['url_private'],
{
'file_out': fileout,
'httpheader': 'Authorization: Bearer ' + team.token
},
config.slack_timeout, "", "")
break
def subprocess_thread_message(message_json, eventrouter, team, channel, history_message):
parent_ts = message_json.get('thread_ts')
if parent_ts:
parent_message = channel.messages.get(SlackTS(parent_ts))
if parent_message:
message = SlackThreadMessage(
parent_message, message_json, team, channel)
parent_message.submessages.append(message)
channel.hash_message(parent_ts)
channel.store_message(message, team)
channel.change_message(parent_ts)
if parent_message.thread_channel and parent_message.thread_channel.active:
parent_message.thread_channel.buffer_prnt(message.sender, parent_message.thread_channel.render(message), message.ts, tag_nick=message.sender_plain)
elif message.ts > channel.last_read and message.has_mention():
parent_message.notify_thread(action="mention", sender_id=message_json["user"])
if config.thread_messages_in_channel or message_json["subtype"] == "thread_broadcast":
thread_tag = "thread_broadcast" if message_json["subtype"] == "thread_broadcast" else "thread_message"
channel.buffer_prnt(
message.sender,
channel.render(message),
message.ts,
tag_nick=message.sender_plain,
history_message=history_message,
extra_tags=[thread_tag],
)
subprocess_thread_broadcast = subprocess_thread_message
def subprocess_channel_join(message_json, eventrouter, team, channel, history_message):
prefix_join = w.prefix("join").strip()
message = SlackMessage(message_json, team, channel, override_sender=prefix_join)
channel.buffer_prnt(prefix_join, channel.render(message), message_json["ts"], tagset='join', tag_nick=message.get_sender()[1], history_message=history_message)
channel.user_joined(message_json['user'])
channel.store_message(message, team)
def subprocess_channel_leave(message_json, eventrouter, team, channel, history_message):
prefix_leave = w.prefix("quit").strip()
message = SlackMessage(message_json, team, channel, override_sender=prefix_leave)
channel.buffer_prnt(prefix_leave, channel.render(message), message_json["ts"], tagset='leave', tag_nick=message.get_sender()[1], history_message=history_message)
channel.user_left(message_json['user'])
channel.store_message(message, team)
def subprocess_channel_topic(message_json, eventrouter, team, channel, history_message):
prefix_topic = w.prefix("network").strip()
message = SlackMessage(message_json, team, channel, override_sender=prefix_topic)
channel.buffer_prnt(prefix_topic, channel.render(message), message_json["ts"], tagset="topic", tag_nick=message.get_sender()[1], history_message=history_message)
channel.set_topic(message_json["topic"])
channel.store_message(message, team)
subprocess_group_join = subprocess_channel_join
subprocess_group_leave = subprocess_channel_leave
subprocess_group_topic = subprocess_channel_topic
def subprocess_message_replied(message_json, eventrouter, team, channel, history_message):
parent_ts = message_json["message"].get("thread_ts")
parent_message = channel.messages.get(SlackTS(parent_ts))
# Thread exists but is not open yet
if parent_message is not None \
and not (parent_message.thread_channel and parent_message.thread_channel.active):
channel.hash_message(parent_ts)
last_message = max(message_json["message"]["replies"], key=lambda x: x["ts"])
if message_json["message"].get("user") == team.myidentifier:
parent_message.notify_thread(action="response", sender_id=last_message["user"])
elif any(team.myidentifier == r["user"] for r in message_json["message"]["replies"]):
parent_message.notify_thread(action="participant", sender_id=last_message["user"])
def subprocess_message_changed(message_json, eventrouter, team, channel, history_message):
new_message = message_json.get("message")
channel.change_message(new_message["ts"], message_json=new_message)
def subprocess_message_deleted(message_json, eventrouter, team, channel, history_message):
message = colorize_string(config.color_deleted, '(deleted)')
channel.change_message(message_json["deleted_ts"], text=message)
def process_reply(message_json, eventrouter, team, channel, metadata):
reply_to = int(message_json["reply_to"])
original_message_json = team.ws_replies.pop(reply_to, None)
if original_message_json:
original_message_json.update(message_json)
channel = team.channels[original_message_json.get('channel')]
process_message(original_message_json, eventrouter, team=team, channel=channel, metadata={})
dbg("REPLY {}".format(message_json))
else:
dbg("Unexpected reply {}".format(message_json))
def process_channel_marked(message_json, eventrouter, team, channel, metadata):
ts = message_json.get("ts")
if ts:
channel.mark_read(ts=ts, force=True, update_remote=False)
else:
dbg("tried to mark something weird {}".format(message_json))
process_group_marked = process_channel_marked
process_im_marked = process_channel_marked
process_mpim_marked = process_channel_marked
def process_channel_joined(message_json, eventrouter, team, channel, metadata):
channel.update_from_message_json(message_json["channel"])
channel.open()
def process_channel_created(message_json, eventrouter, team, channel, metadata):
item = message_json["channel"]
item['is_member'] = False
channel = SlackChannel(eventrouter, team=team, **item)
team.channels[item["id"]] = channel
team.buffer_prnt('Channel created: {}'.format(channel.slack_name))
def process_channel_rename(message_json, eventrouter, team, channel, metadata):
channel.slack_name = message_json['channel']['name']
def process_im_created(message_json, eventrouter, team, channel, metadata):
item = message_json["channel"]
channel = SlackDMChannel(eventrouter, team=team, users=team.users, **item)
team.channels[item["id"]] = channel
team.buffer_prnt('IM channel created: {}'.format(channel.name))
def process_im_open(message_json, eventrouter, team, channel, metadata):
channel.check_should_open(True)
w.buffer_set(channel.channel_buffer, "hotlist", "2")
def process_im_close(message_json, eventrouter, team, channel, metadata):
if channel.channel_buffer:
w.prnt(team.channel_buffer,
'IM {} closed by another client or the server'.format(channel.name))
eventrouter.weechat_controller.unregister_buffer(channel.channel_buffer, False, True)
def process_group_joined(message_json, eventrouter, team, channel, metadata):
item = message_json["channel"]
if item["name"].startswith("mpdm-"):
channel = SlackMPDMChannel(eventrouter, team.users, team.myidentifier, team=team, **item)
else:
channel = SlackGroupChannel(eventrouter, team=team, **item)
team.channels[item["id"]] = channel
channel.open()
def process_reaction_added(message_json, eventrouter, team, channel, metadata):
channel = team.channels.get(message_json["item"].get("channel"))
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts)
if message:
message.add_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("reaction to item type not supported: " + str(message_json))
def process_reaction_removed(message_json, eventrouter, team, channel, metadata):
channel = team.channels.get(message_json["item"].get("channel"))
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts)
if message:
message.remove_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("Reaction to item type not supported: " + str(message_json))
def process_subteam_created(subteam_json, eventrouter, team, channel, metadata):
subteam_json_info = subteam_json['subteam']
is_member = team.myidentifier in subteam_json_info.get('users', [])
subteam = SlackSubteam(team.identifier, is_member=is_member, **subteam_json_info)
team.subteams[subteam_json_info['id']] = subteam
def process_subteam_updated(subteam_json, eventrouter, team, channel, metadata):
current_subteam_info = team.subteams[subteam_json['subteam']['id']]
is_member = team.myidentifier in subteam_json['subteam'].get('users', [])
new_subteam_info = SlackSubteam(team.identifier, is_member=is_member, **subteam_json['subteam'])
team.subteams[subteam_json['subteam']['id']] = new_subteam_info
if current_subteam_info.is_member != new_subteam_info.is_member:
for channel in team.channels.values():
channel.set_highlights()
if config.notify_usergroup_handle_updated and current_subteam_info.handle != new_subteam_info.handle:
message = 'User group {old_handle} has updated its handle to {new_handle} in team {team}.'.format(
name=current_subteam_info.handle, handle=new_subteam_info.handle, team=team.preferred_name)
team.buffer_prnt(message, message=True)
def process_emoji_changed(message_json, eventrouter, team, channel, metadata):
team.load_emoji_completions()
###### New module/global methods
def render_formatting(text):
text = re.sub(r'(^| )\*([^*\n`]+)\*(?=[^\w]|$)',
r'\1{}*\2*{}'.format(w.color(config.render_bold_as),
w.color('-' + config.render_bold_as)),
text,
flags=re.UNICODE)
text = re.sub(r'(^| )_([^_\n`]+)_(?=[^\w]|$)',
r'\1{}_\2_{}'.format(w.color(config.render_italic_as),
w.color('-' + config.render_italic_as)),
text,
flags=re.UNICODE)
return text
def linkify_text(message, team, only_users=False):
# The get_username_map function is a bit heavy, but this whole
# function is only called on message send..
usernames = team.get_username_map()
channels = team.get_channel_map()
usergroups = team.generate_usergroup_map()
message_escaped = (message
# Replace IRC formatting chars with Slack formatting chars.
.replace('\x02', '*')
.replace('\x1D', '_')
.replace('\x1F', config.map_underline_to)
# Escape chars that have special meaning to Slack. Note that we do not
# (and should not) perform full HTML entity-encoding here.
# See https://api.slack.com/docs/message-formatting for details.
.replace('&', '&')
.replace('<', '<')
.replace('>', '>'))
def linkify_word(match):
word = match.group(0)
prefix, name = match.groups()
if prefix == "@":
if name in ["channel", "everyone", "group", "here"]:
return "<!{}>".format(name)
elif name in usernames:
return "<@{}>".format(usernames[name])
elif word in usergroups.keys():
return "<!subteam^{}|{}>".format(usergroups[word], word)
elif prefix == "#" and not only_users:
if word in channels:
return "<#{}|{}>".format(channels[word], name)
return word
linkify_regex = r'(?:^|(?<=\s))([@#])([\w\(\)\'.-]+)'
return re.sub(linkify_regex, linkify_word, message_escaped, flags=re.UNICODE)
def unfurl_blocks(message_json):
block_text = [""]
for block in message_json["blocks"]:
try:
if block["type"] == "section":
fields = block.get("fields", [])
if "text" in block:
fields.insert(0, block["text"])
block_text.extend(unfurl_block_element(field) for field in fields)
elif block["type"] == "actions":
elements = []
for element in block["elements"]:
if element["type"] == "button":
elements.append(unfurl_block_element(element["text"]))
else:
elements.append(colorize_string(config.color_deleted,
'<<Unsupported block action type "{}">>'.format(element["type"])))
block_text.append(" | ".join(elements))
elif block["type"] == "call":
block_text.append("Join via " + block["call"]["v1"]["join_url"])
elif block["type"] == "divider":
block_text.append("---")
elif block["type"] == "context":
block_text.append(" | ".join(unfurl_block_element(el) for el in block["elements"]))
elif block["type"] == "image":
if "title" in block:
block_text.append(unfurl_block_element(block["title"]))
block_text.append(unfurl_block_element(block))
elif block["type"] == "rich_text":
continue
else:
block_text.append(colorize_string(config.color_deleted,
'<<Unsupported block type "{}">>'.format(block["type"])))
dbg('Unsupported block: "{}"'.format(json.dumps(block)), level=4)
except Exception as e:
dbg("Failed to unfurl block ({}): {}".format(repr(e), json.dumps(block)), level=4)
return "\n".join(block_text)
def unfurl_block_element(text):
if text["type"] == "mrkdwn":
return render_formatting(text["text"])
elif text["type"] == "plain_text":
return text["text"]
elif text["type"] == "image":
return "{} ({})".format(text["image_url"], text["alt_text"])
def unfurl_refs(text):
"""
input : <@U096Q7CQM|someuser> has joined the channel
ouput : someuser has joined the channel
"""
# Find all strings enclosed by <>
# - <https://example.com|example with spaces>
# - <#C2147483705|#otherchannel>
# - <@U2147483697|@othernick>
# - <!subteam^U2147483697|@group>
# Test patterns lives in ./_pytest/test_unfurl.py
def unfurl_ref(match):
ref, fallback = match.groups()
resolved_ref = resolve_ref(ref)
if resolved_ref != ref:
return resolved_ref
if fallback and not config.unfurl_ignore_alt_text:
if ref.startswith("#"):
return "#{}".format(fallback)
elif ref.startswith("@"):
return fallback
elif ref.startswith("!subteam"):
prefix = "@" if not fallback.startswith("@") else ""
return prefix + fallback
elif ref.startswith("!date"):
return fallback
else:
match_url = r"^\w+:(//)?{}$".format(re.escape(fallback))
url_matches_desc = re.match(match_url, ref)
if url_matches_desc and config.unfurl_auto_link_display == "text":
return fallback
elif url_matches_desc and config.unfurl_auto_link_display == "url":
return ref
else:
return "{} ({})".format(ref, fallback)
return ref
return re.sub(r"<([^|>]*)(?:\|([^>]*))?>", unfurl_ref, text)
def unhtmlescape(text):
return text.replace("<", "<") \
.replace(">", ">") \
.replace("&", "&")
def unwrap_attachments(message_json, text_before):
text_before_unescaped = unhtmlescape(text_before)
attachment_texts = []
a = message_json.get("attachments")
if a:
if text_before:
attachment_texts.append('')
for attachment in a:
# Attachments should be rendered roughly like:
#
# $pretext
# $author: (if rest of line is non-empty) $title ($title_link) OR $from_url
# $author: (if no $author on previous line) $text
# $fields
t = []
prepend_title_text = ''
if 'author_name' in attachment:
prepend_title_text = attachment['author_name'] + ": "
if 'pretext' in attachment:
t.append(attachment['pretext'])
title = attachment.get('title')
title_link = attachment.get('title_link', '')
if title_link in text_before_unescaped:
title_link = ''
if title and title_link:
t.append('%s%s (%s)' % (prepend_title_text, title, title_link,))
prepend_title_text = ''
elif title and not title_link:
t.append('%s%s' % (prepend_title_text, title,))
prepend_title_text = ''
from_url = attachment.get('from_url', '')
if from_url not in text_before_unescaped and from_url != title_link:
t.append(from_url)
atext = attachment.get("text")
if atext:
tx = re.sub(r' *\n[\n ]+', '\n', atext)
t.append(prepend_title_text + tx)
prepend_title_text = ''
image_url = attachment.get('image_url', '')
if image_url not in text_before_unescaped and image_url != title_link:
t.append(image_url)
fields = attachment.get("fields")
if fields:
for f in fields:
if f.get('title'):
t.append('%s %s' % (f['title'], f['value'],))
else:
t.append(f['value'])
fallback = attachment.get("fallback")
if t == [] and fallback:
t.append(fallback)
attachment_texts.append("\n".join([x.strip() for x in t if x]))
return "\n".join(attachment_texts)
def unwrap_files(message_json, text_before):
files_texts = []
for f in message_json.get('files', []):
if f.get('mode', '') != 'tombstone':
text = '{} ({})'.format(f['url_private'], f['title'])
else:
text = colorize_string(config.color_deleted, '(This file was deleted.)')
files_texts.append(text)
if text_before:
files_texts.insert(0, '')
return "\n".join(files_texts)
def resolve_ref(ref):
if ref in ['!channel', '!everyone', '!group', '!here']:
return ref.replace('!', '@')
for team in EVENTROUTER.teams.values():
if ref.startswith('@'):
user = team.users.get(ref[1:])
if user:
suffix = config.external_user_suffix if user.is_external else ''
return '@{}{}'.format(user.name, suffix)
elif ref.startswith('#'):
channel = team.channels.get(ref[1:])
if channel:
return channel.name
elif ref.startswith('!subteam'):
_, subteam_id = ref.split('^')
subteam = team.subteams.get(subteam_id)
if subteam:
return subteam.handle
elif ref.startswith("!date"):
parts = ref.split('^')
ref_datetime = datetime.fromtimestamp(int(parts[1]))
link_suffix = ' ({})'.format(parts[3]) if len(parts) > 3 else ''
token_to_format = {
'date_num': '%Y-%m-%d',
'date': '%B %d, %Y',
'date_short': '%b %d, %Y',
'date_long': '%A, %B %d, %Y',
'time': '%H:%M',
'time_secs': '%H:%M:%S'
}
def replace_token(match):
token = match.group(1)
if token.startswith('date_') and token.endswith('_pretty'):
if ref_datetime.date() == date.today():
return 'today'
elif ref_datetime.date() == date.today() - timedelta(days=1):
return 'yesterday'
elif ref_datetime.date() == date.today() + timedelta(days=1):
return 'tomorrow'
else:
token = token.replace('_pretty', '')
if token in token_to_format:
return ref_datetime.strftime(token_to_format[token])
else:
return match.group(0)
return re.sub(r"{([^}]+)}", replace_token, parts[2]) + link_suffix
# Something else, just return as-is
return ref
def create_user_status_string(profile):
real_name = profile.get("real_name")
status_emoji = replace_string_with_emoji(profile.get("status_emoji", ""))
status_text = profile.get("status_text")
if status_emoji or status_text:
return "{} | {} {}".format(real_name, status_emoji, status_text)
else:
return real_name
def create_reaction_string(reaction, myidentifier):
if config.show_reaction_nicks:
nicks = [resolve_ref('@{}'.format(user)) for user in reaction['users']]
users = '({})'.format(','.join(nicks))
else:
users = len(reaction['users'])
reaction_string = ':{}:{}'.format(reaction['name'], users)
if myidentifier in reaction['users']:
return colorize_string(config.color_reaction_suffix_added_by_you, reaction_string,
reset_color=config.color_reaction_suffix)
else:
return reaction_string
def create_reactions_string(reactions, myidentifier):
reactions_with_users = [r for r in reactions if len(r['users']) > 0]
reactions_string = ' '.join(create_reaction_string(r, myidentifier) for r in reactions_with_users)
if reactions_string:
return ' ' + colorize_string(config.color_reaction_suffix, '[{}]'.format(reactions_string))
else:
return ''
def hdata_line_ts(line_pointer):
data = w.hdata_pointer(hdata.line, line_pointer, 'data')
ts_major = w.hdata_time(hdata.line_data, data, 'date')
ts_minor = w.hdata_time(hdata.line_data, data, 'date_printed')
return (ts_major, ts_minor)
def modify_buffer_line(buffer_pointer, ts, new_text):
own_lines = w.hdata_pointer(hdata.buffer, buffer_pointer, 'own_lines')
line_pointer = w.hdata_pointer(hdata.lines, own_lines, 'last_line')
# Find the last line with this ts
while line_pointer and hdata_line_ts(line_pointer) != (ts.major, ts.minor):
line_pointer = w.hdata_move(hdata.line, line_pointer, -1)
# Find all lines for the message
pointers = []
while line_pointer and hdata_line_ts(line_pointer) == (ts.major, ts.minor):
pointers.append(line_pointer)
line_pointer = w.hdata_move(hdata.line, line_pointer, -1)
pointers.reverse()
# Split the message into at most the number of existing lines as we can't insert new lines
lines = new_text.split('\n', len(pointers) - 1)
# Replace newlines to prevent garbled lines in bare display mode
lines = [line.replace('\n', ' | ') for line in lines]
# Extend lines in case the new message is shorter than the old as we can't delete lines
lines += [''] * (len(pointers) - len(lines))
for pointer, line in zip(pointers, lines):
data = w.hdata_pointer(hdata.line, pointer, 'data')
w.hdata_update(hdata.line_data, data, {"message": line})
return w.WEECHAT_RC_OK
def modify_last_print_time(buffer_pointer, ts_minor):
"""
This overloads the time printed field to let us store the slack
per message unique id that comes after the "." in a slack ts
"""
own_lines = w.hdata_pointer(hdata.buffer, buffer_pointer, 'own_lines')
line_pointer = w.hdata_pointer(hdata.lines, own_lines, 'last_line')
while line_pointer:
data = w.hdata_pointer(hdata.line, line_pointer, 'data')
w.hdata_update(hdata.line_data, data, {"date_printed": str(ts_minor)})
if w.hdata_string(hdata.line_data, data, 'prefix'):
# Reached the first line of the message, so stop here
break
# Move one line backwards so all lines of the message are set
line_pointer = w.hdata_move(hdata.line, line_pointer, -1)
return w.WEECHAT_RC_OK
def nick_from_profile(profile, username):
full_name = profile.get('real_name') or username
if config.use_full_names:
nick = full_name
else:
nick = profile.get('display_name') or full_name
return nick.replace(' ', '')
def format_nick(nick, previous_nick=None):
if nick == previous_nick:
nick = w.config_string(w.config_get('weechat.look.prefix_same_nick')) or nick
nick_prefix = w.config_string(w.config_get('weechat.look.nick_prefix'))
nick_prefix_color_name = w.config_string(w.config_get('weechat.color.chat_nick_prefix'))
nick_suffix = w.config_string(w.config_get('weechat.look.nick_suffix'))
nick_suffix_color_name = w.config_string(w.config_get('weechat.color.chat_nick_prefix'))
return colorize_string(nick_prefix_color_name, nick_prefix) + nick + colorize_string(nick_suffix_color_name, nick_suffix)
def tag(tagset=None, user=None, self_msg=False, backlog=False, no_log=False, extra_tags=None):
tagsets = {
"team_info": {"no_highlight", "log3"},
"team_message": {"irc_privmsg", "notify_message", "log1"},
"dm": {"irc_privmsg", "notify_private", "log1"},
"join": {"irc_join", "no_highlight", "log4"},
"leave": {"irc_part", "no_highlight", "log4"},
"topic": {"irc_topic", "no_highlight", "log3"},
"channel": {"irc_privmsg", "notify_message", "log1"},
}
nick_tag = {"nick_{}".format(user).replace(" ", "_")} if user else set()
slack_tag = {"slack_{}".format(tagset or "default")}
tags = nick_tag | slack_tag | tagsets.get(tagset, set())
if self_msg or backlog:
tags -= {"notify_highlight", "notify_message", "notify_private"}
tags |= {"notify_none", "no_highlight"}
if self_msg:
tags |= {"self_msg"}
if backlog:
tags |= {"logger_backlog"}
if no_log:
tags |= {"no_log"}
tags = {tag for tag in tags if not tag.startswith("log") or tag == "logger_backlog"}
if extra_tags:
tags |= set(extra_tags)
return ",".join(tags)
def set_own_presence_active(team):
slackbot = team.get_channel_map()['Slackbot']
channel = team.channels[slackbot]
request = {"type": "typing", "channel": channel.identifier}
channel.team.send_to_websocket(request, expect_reply=False)
###### New/converted command_ commands
@slack_buffer_or_ignore
@utf8_decode
def invite_command_cb(data, current_buffer, args):
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
split_args = args.split()[1:]
if not split_args:
w.prnt('', 'Too few arguments for command "/invite" (help on command: /help invite)')
return w.WEECHAT_RC_OK_EAT
if split_args[-1].startswith("#") or split_args[-1].startswith(config.group_name_prefix):
nicks = split_args[:-1]
channel = team.channels.get(team.get_channel_map().get(split_args[-1]))
if not nicks or not channel:
w.prnt('', '{}: No such nick/channel'.format(split_args[-1]))
return w.WEECHAT_RC_OK_EAT
else:
nicks = split_args
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
all_users = team.get_username_map()
users = set()
for nick in nicks:
user = all_users.get(nick.lstrip('@'))
if not user:
w.prnt('', 'ERROR: Unknown user: {}'.format(nick))
return w.WEECHAT_RC_OK_EAT
users.add(user)
s = SlackRequest(team, "conversations.invite", {"channel": channel.identifier, "users": ",".join(users)},
channel=channel, metadata={"nicks": nicks})
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def part_command_cb(data, current_buffer, args):
e = EVENTROUTER
args = args.split()
if len(args) > 1:
team = e.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
channel = "".join(args[1:])
if channel in cmap:
buffer_ptr = team.channels[cmap[channel]].channel_buffer
e.weechat_controller.unregister_buffer(buffer_ptr, update_remote=True, close_buffer=True)
else:
w.prnt(team.channel_buffer, "{}: No such channel".format(channel))
else:
e.weechat_controller.unregister_buffer(current_buffer, update_remote=True, close_buffer=True)
return w.WEECHAT_RC_OK_EAT
def parse_topic_command(command):
args = command.split()[1:]
channel_name = None
topic = None
if args:
if args[0].startswith('#'):
channel_name = args[0]
topic = args[1:]
else:
topic = args
if topic == []:
topic = None
if topic:
topic = ' '.join(topic)
if topic == '-delete':
topic = ''
return channel_name, topic
@slack_buffer_or_ignore
@utf8_decode
def topic_command_cb(data, current_buffer, command):
"""
Change the topic of a channel
/topic [<channel>] [<topic>|-delete]
"""
channel_name, topic = parse_topic_command(command)
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
if channel_name:
channel = team.channels.get(team.get_channel_map().get(channel_name))
else:
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if not channel:
w.prnt(team.channel_buffer, "{}: No such channel".format(channel_name))
return w.WEECHAT_RC_OK_EAT
if topic is None:
w.prnt(channel.channel_buffer,
'Topic for {} is "{}"'.format(channel.name, channel.render_topic()))
else:
s = SlackRequest(team, "conversations.setTopic",
{"channel": channel.identifier, "topic": linkify_text(topic, team)}, channel=channel)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def whois_command_cb(data, current_buffer, command):
"""
Get real name of user
/whois <nick>
"""
args = command.split()
if len(args) < 2:
w.prnt(current_buffer, "Not enough arguments")
return w.WEECHAT_RC_OK_EAT
user = args[1]
if (user.startswith('@')):
user = user[1:]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
u = team.users.get(team.get_username_map().get(user))
if u:
def print_profile(field):
value = u.profile.get(field)
if value:
team.buffer_prnt("[{}]: {}: {}".format(user, field, value))
team.buffer_prnt("[{}]: {}".format(user, u.real_name))
status_emoji = replace_string_with_emoji(u.profile.get("status_emoji", ""))
status_text = u.profile.get("status_text", "")
if status_emoji or status_text:
team.buffer_prnt("[{}]: {} {}".format(user, status_emoji, status_text))
team.buffer_prnt("[{}]: username: {}".format(user, u.username))
team.buffer_prnt("[{}]: id: {}".format(user, u.identifier))
print_profile('title')
print_profile('email')
print_profile('phone')
print_profile('skype')
else:
team.buffer_prnt("[{}]: No such user".format(user))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def me_command_cb(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
message = args.split(' ', 1)[1]
channel.send_message(message, subtype='me_message')
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def command_register(data, current_buffer, args):
"""
/slack register [code/token]
Register a Slack team in wee-slack. Call this without any arguments and
follow the instructions to register a new team. If you already have a token
for a team, you can call this with that token to add it.
"""
CLIENT_ID = "2468770254.51917335286"
CLIENT_SECRET = "dcb7fe380a000cba0cca3169a5fe8d70" # Not really a secret.
REDIRECT_URI = "https%3A%2F%2Fwee-slack.github.io%2Fwee-slack%2Foauth%23"
if not args:
message = textwrap.dedent("""
### Connecting to a Slack team with OAuth ###
1) Paste this link into a browser: https://slack.com/oauth/authorize?client_id={}&scope=client&redirect_uri={}
2) Select the team you wish to access from wee-slack in your browser. If you want to add multiple teams, you will have to repeat this whole process for each team.
3) Click "Authorize" in the browser.
If you get a message saying you are not authorized to install wee-slack, the team has restricted Slack app installation and you will have to request it from an admin. To do that, go to https://my.slack.com/apps/A1HSZ9V8E-wee-slack and click "Request to Install".
4) The web page will show a command in the form `/slack register <code>`. Run this command in weechat.
""").strip().format(CLIENT_ID, REDIRECT_URI)
w.prnt("", message)
return w.WEECHAT_RC_OK_EAT
elif args.startswith('xox'):
add_token(args)
return w.WEECHAT_RC_OK_EAT
uri = (
"https://slack.com/api/oauth.access?"
"client_id={}&client_secret={}&redirect_uri={}&code={}"
).format(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI, args)
params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
w.hook_process_hashtable('url:', params, config.slack_timeout, "", "")
w.hook_process_hashtable("url:{}".format(uri), params, config.slack_timeout, "register_callback", "")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def register_callback(data, command, return_code, out, err):
if return_code != 0:
w.prnt("", "ERROR: problem when trying to get Slack OAuth token. Got return code {}. Err: {}".format(return_code, err))
w.prnt("", "Check the network or proxy settings")
return w.WEECHAT_RC_OK_EAT
if len(out) <= 0:
w.prnt("", "ERROR: problem when trying to get Slack OAuth token. Got 0 length answer. Err: {}".format(err))
w.prnt("", "Check the network or proxy settings")
return w.WEECHAT_RC_OK_EAT
d = json.loads(out)
if not d["ok"]:
w.prnt("",
"ERROR: Couldn't get Slack OAuth token: {}".format(d['error']))
return w.WEECHAT_RC_OK_EAT
add_token(d['access_token'], d['team_name'])
return w.WEECHAT_RC_OK_EAT
def add_token(token, team_name=None):
if config.is_default('slack_api_token'):
w.config_set_plugin('slack_api_token', token)
else:
# Add new token to existing set, joined by comma.
existing_tokens = config.get_string('slack_api_token')
if token in existing_tokens:
print_error('This token is already registered')
return
w.config_set_plugin('slack_api_token', ','.join([existing_tokens, token]))
if team_name:
w.prnt("", "Success! Added team \"{}\"".format(team_name))
else:
w.prnt("", "Success! Added token")
w.prnt("", "Please reload wee-slack with: /python reload slack")
w.prnt("", "If you want to add another team you can repeat this process from step 1 before reloading wee-slack.")
@slack_buffer_or_ignore
@utf8_decode
def msg_command_cb(data, current_buffer, args):
aargs = args.split(None, 2)
who = aargs[1].lstrip('@')
if who == "*":
who = EVENTROUTER.weechat_controller.buffers[current_buffer].name
else:
join_query_command_cb(data, current_buffer, '/query ' + who)
if len(aargs) > 2:
message = aargs[2]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
if who in cmap:
channel = team.channels[cmap[who]]
channel.send_message(message)
return w.WEECHAT_RC_OK_EAT
def print_team_items_info(team, header, items, extra_info_function):
team.buffer_prnt("{}:".format(header))
if items:
max_name_length = max(len(item.name) for item in items)
for item in sorted(items, key=lambda item: item.name.lower()):
extra_info = extra_info_function(item)
team.buffer_prnt(" {:<{}}({})".format(item.name, max_name_length + 2, extra_info))
return w.WEECHAT_RC_OK_EAT
def print_users_info(team, header, users):
def extra_info_function(user):
external_text = ", external" if user.is_external else ""
return user.presence + external_text
return print_team_items_info(team, header, users, extra_info_function)
@slack_buffer_required
@utf8_decode
def command_teams(data, current_buffer, args):
"""
/slack teams
List the connected Slack teams.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
teams = EVENTROUTER.teams.values()
extra_info_function = lambda team: "token: {}".format(token_for_print(team.token))
return print_team_items_info(team, "Slack teams", teams, extra_info_function)
@slack_buffer_required
@utf8_decode
def command_channels(data, current_buffer, args):
"""
/slack channels
List the channels in the current team.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
channels = [channel for channel in team.channels.values() if channel.type not in ['im', 'mpim']]
def extra_info_function(channel):
if channel.active:
return "member"
elif getattr(channel, "is_archived", None):
return "archived"
else:
return "not a member"
return print_team_items_info(team, "Channels", channels, extra_info_function)
@slack_buffer_required
@utf8_decode
def command_users(data, current_buffer, args):
"""
/slack users
List the users in the current team.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
return print_users_info(team, "Users", team.users.values())
@slack_buffer_required
@utf8_decode
def command_usergroups(data, current_buffer, args):
"""
/slack usergroups [handle]
List the usergroups in the current team
If handle is given show the members in the usergroup
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
usergroups = team.generate_usergroup_map()
usergroup_key = usergroups.get(args)
if usergroup_key:
s = SlackRequest(team, "usergroups.users.list", {"usergroup": usergroup_key},
metadata={'usergroup_handle': args})
EVENTROUTER.receive(s)
elif args:
w.prnt('', 'ERROR: Unknown usergroup handle: {}'.format(args))
return w.WEECHAT_RC_ERROR
else:
def extra_info_function(subteam):
is_member = 'member' if subteam.is_member else 'not a member'
return '{}, {}'.format(subteam.handle, is_member)
return print_team_items_info(team, "Usergroups", team.subteams.values(), extra_info_function)
return w.WEECHAT_RC_OK_EAT
command_usergroups.completion = '%(usergroups)'
@slack_buffer_required
@utf8_decode
def command_talk(data, current_buffer, args):
"""
/slack talk <user>[,<user2>[,<user3>...]]
Open a chat with the specified user(s).
"""
if not args:
w.prnt('', 'Usage: /slack talk <user>[,<user2>[,<user3>...]]')
return w.WEECHAT_RC_ERROR
return join_query_command_cb(data, current_buffer, '/query ' + args)
command_talk.completion = '%(nicks)'
@slack_buffer_or_ignore
@utf8_decode
def join_query_command_cb(data, current_buffer, args):
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
split_args = args.split(' ', 1)
if len(split_args) < 2 or not split_args[1]:
w.prnt('', 'Too few arguments for command "{}" (help on command: /help {})'
.format(split_args[0], split_args[0].lstrip('/')))
return w.WEECHAT_RC_OK_EAT
query = split_args[1]
# Try finding the channel by name
channel = team.channels.get(team.get_channel_map().get(query))
# If the channel doesn't exist, try finding a DM or MPDM instead
if not channel:
if query.startswith('#'):
w.prnt('', 'ERROR: Unknown channel: {}'.format(query))
return w.WEECHAT_RC_OK_EAT
# Get the IDs of the users
all_users = team.get_username_map()
users = set()
for username in query.split(','):
user = all_users.get(username.lstrip('@'))
if not user:
w.prnt('', 'ERROR: Unknown user: {}'.format(username))
return w.WEECHAT_RC_OK_EAT
users.add(user)
if users:
if len(users) > 1:
channel_type = 'mpim'
# Add the current user since MPDMs include them as a member
users.add(team.myidentifier)
else:
channel_type = 'im'
channel = team.find_channel_by_members(users, channel_type=channel_type)
# If the DM or MPDM doesn't exist, create it
if not channel:
s = SlackRequest(team, SLACK_API_TRANSLATOR[channel_type]['join'],
{'users': ','.join(users)})
EVENTROUTER.receive(s)
if channel:
channel.open()
if config.switch_buffer_on_join:
w.buffer_set(channel.channel_buffer, "display", "1")
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_showmuted(data, current_buffer, args):
"""
/slack showmuted
List the muted channels in the current team.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
muted_channels = [team.channels[key].name
for key in team.muted_channels if key in team.channels]
team.buffer_prnt("Muted channels: {}".format(', '.join(muted_channels)))
return w.WEECHAT_RC_OK_EAT
def get_msg_from_id(channel, msg_id):
if msg_id[0] == '$':
msg_id = msg_id[1:]
ts = channel.hashed_messages.get(msg_id)
return channel.messages.get(ts)
@slack_buffer_required
@utf8_decode
def command_thread(data, current_buffer, args):
"""
/thread [message_id]
Open the thread for the message.
If no message id is specified the last thread in channel will be opened.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if not isinstance(channel, SlackChannelCommon):
print_error('/thread can not be used in the team buffer, only in a channel')
return w.WEECHAT_RC_ERROR
if args:
msg = get_msg_from_id(channel, args)
if not msg:
w.prnt('', 'ERROR: Invalid id given, must be an existing id')
return w.WEECHAT_RC_OK_EAT
else:
for message in reversed(channel.messages.values()):
if type(message) == SlackMessage and message.number_of_replies():
msg = message
break
else:
w.prnt('', 'ERROR: No threads found in channel')
return w.WEECHAT_RC_OK_EAT
msg.open_thread(switch=config.switch_buffer_on_join)
return w.WEECHAT_RC_OK_EAT
command_thread.completion = '%(threads)'
@slack_buffer_required
@utf8_decode
def command_reply(data, current_buffer, args):
"""
/reply [-alsochannel] [<count/message_id>] <message>
When in a channel buffer:
/reply [-alsochannel] <count/message_id> <message>
Reply in a thread on the message. Specify either the message id or a count
upwards to the message from the last message.
When in a thread buffer:
/reply [-alsochannel] <message>
Reply to the current thread. This can be used to send the reply to the
rest of the channel.
In either case, -alsochannel also sends the reply to the parent channel.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
parts = args.split(None, 1)
if parts[0] == "-alsochannel":
args = parts[1]
broadcast = True
else:
broadcast = False
if isinstance(channel, SlackThreadChannel):
text = args
msg = channel.parent_message
else:
try:
msg_id, text = args.split(None, 1)
except ValueError:
w.prnt('', 'Usage (when in a channel buffer): /reply [-alsochannel] <count/message_id> <message>')
return w.WEECHAT_RC_OK_EAT
msg = get_msg_from_id(channel, msg_id)
if msg:
if isinstance(msg, SlackThreadMessage):
parent_id = str(msg.parent_message.ts)
else:
parent_id = str(msg.ts)
elif msg_id.isdigit() and int(msg_id) >= 1:
mkeys = channel.main_message_keys_reversed()
parent_id = str(next(islice(mkeys, int(msg_id) - 1, None)))
else:
w.prnt('', 'ERROR: Invalid id given, must be a number greater than 0 or an existing id')
return w.WEECHAT_RC_OK_EAT
channel.send_message(text, request_dict_ext={'thread_ts': parent_id, 'reply_broadcast': broadcast})
return w.WEECHAT_RC_OK_EAT
command_reply.completion = '-alsochannel %(threads)||%(threads)'
@slack_buffer_required
@utf8_decode
def command_rehistory(data, current_buffer, args):
"""
/rehistory
Reload the history in the current channel.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
channel.clear_messages()
channel.get_history()
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_hide(data, current_buffer, args):
"""
/hide
Hide the current channel if it is marked as distracting.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
name = channel.formatted_name(style='long_default')
if name in config.distracting_channels:
w.buffer_set(channel.channel_buffer, "hidden", "1")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def slack_command_cb(data, current_buffer, args):
split_args = args.split(' ', 1)
cmd_name = split_args[0]
cmd_args = split_args[1] if len(split_args) > 1 else ''
cmd = EVENTROUTER.cmds.get(cmd_name or 'help')
if not cmd:
w.prnt('', 'Command not found: ' + cmd_name)
return w.WEECHAT_RC_OK
return cmd(data, current_buffer, cmd_args)
@utf8_decode
def command_help(data, current_buffer, args):
"""
/slack help [command]
Print help for /slack commands.
"""
if args:
cmd = EVENTROUTER.cmds.get(args)
if cmd:
cmds = {args: cmd}
else:
w.prnt('', 'Command not found: ' + args)
return w.WEECHAT_RC_OK
else:
cmds = EVENTROUTER.cmds
w.prnt('', '\n{}'.format(colorize_string('bold', 'Slack commands:')))
script_prefix = '{0}[{1}python{0}/{1}slack{0}]{1}'.format(w.color('green'), w.color('reset'))
for _, cmd in sorted(cmds.items()):
name, cmd_args, description = parse_help_docstring(cmd)
w.prnt('', '\n{} {} {}\n\n{}'.format(
script_prefix, colorize_string('white', name), cmd_args, description))
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_distracting(data, current_buffer, args):
"""
/slack distracting
Add or remove the current channel from distracting channels. You can hide
or unhide these channels with /slack nodistractions.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
fullname = channel.formatted_name(style="long_default")
if fullname in config.distracting_channels:
config.distracting_channels.remove(fullname)
else:
config.distracting_channels.append(fullname)
w.config_set_plugin('distracting_channels', ','.join(config.distracting_channels))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_slash(data, current_buffer, args):
"""
/slack slash /customcommand arg1 arg2 arg3
Run a custom slack command.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
team = channel.team
split_args = args.split(' ', 1)
command = split_args[0]
text = split_args[1] if len(split_args) > 1 else ""
text_linkified = linkify_text(text, team, only_users=True)
s = SlackRequest(team, "chat.command",
{"command": command, "text": text_linkified, 'channel': channel.identifier},
channel=channel, metadata={'command': command, 'command_args': text})
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_mute(data, current_buffer, args):
"""
/slack mute
Toggle mute on the current channel.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
team = channel.team
team.muted_channels ^= {channel.identifier}
muted_str = "Muted" if channel.identifier in team.muted_channels else "Unmuted"
team.buffer_prnt("{} channel {}".format(muted_str, channel.name))
s = SlackRequest(team, "users.prefs.set",
{"name": "muted_channels", "value": ",".join(team.muted_channels)}, channel=channel)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_linkarchive(data, current_buffer, args):
"""
/slack linkarchive [message_id]
Place a link to the channel or message in the input bar.
Use cursor or mouse mode to get the id.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
url = 'https://{}/'.format(channel.team.domain)
if isinstance(channel, SlackChannelCommon):
url += 'archives/{}/'.format(channel.identifier)
if args:
if args[0] == '$':
message_id = args[1:]
else:
message_id = args
ts = channel.hashed_messages.get(message_id)
message = channel.messages.get(ts)
if message:
url += 'p{}{:0>6}'.format(message.ts.majorstr(), message.ts.minorstr())
if isinstance(message, SlackThreadMessage):
url += "?thread_ts={}&cid={}".format(message.parent_message.ts, channel.identifier)
else:
w.prnt('', 'ERROR: Invalid id given, must be an existing id')
return w.WEECHAT_RC_OK_EAT
w.command(current_buffer, "/input insert {}".format(url))
return w.WEECHAT_RC_OK_EAT
command_linkarchive.completion = '%(threads)'
@utf8_decode
def command_nodistractions(data, current_buffer, args):
"""
/slack nodistractions
Hide or unhide all channels marked as distracting.
"""
global hide_distractions
hide_distractions = not hide_distractions
channels = [channel for channel in EVENTROUTER.weechat_controller.buffers.values()
if channel in config.distracting_channels]
for channel in channels:
w.buffer_set(channel.channel_buffer, "hidden", str(int(hide_distractions)))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_upload(data, current_buffer, args):
"""
/slack upload <filename>
Uploads a file to the current buffer.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
weechat_dir = w.info_get("weechat_dir", "")
file_path = os.path.join(weechat_dir, os.path.expanduser(args))
if channel.type == 'team':
w.prnt('', "ERROR: Can't upload a file to the team buffer")
return w.WEECHAT_RC_ERROR
if not os.path.isfile(file_path):
unescaped_file_path = file_path.replace(r'\ ', ' ')
if os.path.isfile(unescaped_file_path):
file_path = unescaped_file_path
else:
w.prnt('', 'ERROR: Could not find file: {}'.format(file_path))
return w.WEECHAT_RC_ERROR
post_data = {
'channels': channel.identifier,
}
if isinstance(channel, SlackThreadChannel):
post_data['thread_ts'] = channel.parent_message.ts
url = SlackRequest(channel.team, 'files.upload', post_data, channel=channel).request_string()
options = [
'-s',
'-Ffile=@{}'.format(file_path),
url
]
proxy_string = ProxyWrapper().curl()
if proxy_string:
options.append(proxy_string)
options_hashtable = {'arg{}'.format(i + 1): arg for i, arg in enumerate(options)}
w.hook_process_hashtable('curl', options_hashtable, config.slack_timeout, 'upload_callback', '')
return w.WEECHAT_RC_OK_EAT
command_upload.completion = '%(filename)'
@utf8_decode
def upload_callback(data, command, return_code, out, err):
if return_code != 0:
w.prnt("", "ERROR: Couldn't upload file. Got return code {}. Error: {}".format(return_code, err))
return w.WEECHAT_RC_OK_EAT
try:
response = json.loads(out)
except JSONDecodeError:
w.prnt("", "ERROR: Couldn't process response from file upload. Got: {}".format(out))
return w.WEECHAT_RC_OK_EAT
if not response["ok"]:
w.prnt("", "ERROR: Couldn't upload file. Error: {}".format(response["error"]))
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def away_command_cb(data, current_buffer, args):
all_servers, message = re.match('^/away( -all)? ?(.*)', args).groups()
if all_servers:
team_buffers = [team.channel_buffer for team in EVENTROUTER.teams.values()]
elif current_buffer in EVENTROUTER.weechat_controller.buffers:
team_buffers = [current_buffer]
else:
return w.WEECHAT_RC_OK
for team_buffer in team_buffers:
if message:
command_away(data, team_buffer, args)
else:
command_back(data, team_buffer, args)
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_away(data, current_buffer, args):
"""
/slack away
Sets your status as 'away'.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team, "users.setPresence", {"presence": "away"})
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_status(data, current_buffer, args):
"""
/slack status [<emoji> [<status_message>]|-delete]
Lets you set your Slack Status (not to be confused with away/here).
Prints current status if no arguments are given, unsets the status if -delete is given.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
split_args = args.split(" ", 1)
if not split_args[0]:
profile = team.users[team.myidentifier].profile
team.buffer_prnt("Status: {} {}".format(
replace_string_with_emoji(profile.get("status_emoji", "")),
profile.get("status_text", "")))
return w.WEECHAT_RC_OK
emoji = "" if split_args[0] == "-delete" else split_args[0]
text = split_args[1] if len(split_args) > 1 else ""
new_profile = {"status_text": text, "status_emoji": emoji}
s = SlackRequest(team, "users.profile.set", {"profile": new_profile})
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK
command_status.completion = "-delete|%(emoji)"
@utf8_decode
def line_event_cb(data, signal, hashtable):
buffer_pointer = hashtable["_buffer"]
line_timestamp = hashtable["_chat_line_date"]
line_time_id = hashtable["_chat_line_date_printed"]
channel = EVENTROUTER.weechat_controller.buffers.get(buffer_pointer)
if line_timestamp and line_time_id and isinstance(channel, SlackChannelCommon):
ts = SlackTS("{}.{}".format(line_timestamp, line_time_id))
message_hash = channel.hash_message(ts)
if message_hash is None:
return w.WEECHAT_RC_OK
message_hash = "$" + message_hash
if data == "message":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/input insert {}".format(message_hash))
elif data == "delete":
w.command(buffer_pointer, "/input send {}s///".format(message_hash))
elif data == "linkarchive":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/slack linkarchive {}".format(message_hash[1:]))
elif data == "reply":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/input insert /reply {}\\x20".format(message_hash))
elif data == "thread":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/thread {}".format(message_hash))
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_back(data, current_buffer, args):
"""
/slack back
Sets your status as 'back'.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team, "users.setPresence", {"presence": "auto"})
EVENTROUTER.receive(s)
set_own_presence_active(team)
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_label(data, current_buffer, args):
"""
/label <name>
Rename a thread buffer. Note that this is not permanent. It will only last
as long as you keep the buffer and wee-slack open.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if channel.type == 'thread':
new_name = " +" + args
channel.label = new_name
w.buffer_set(channel.channel_buffer, "short_name", new_name)
return w.WEECHAT_RC_OK
@utf8_decode
def set_unread_cb(data, current_buffer, command):
for channel in EVENTROUTER.weechat_controller.buffers.values():
channel.mark_read()
return w.WEECHAT_RC_OK
@slack_buffer_or_ignore
@utf8_decode
def set_unread_current_buffer_cb(data, current_buffer, command):
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
channel.mark_read()
return w.WEECHAT_RC_OK
###### NEW EXCEPTIONS
class InvalidType(Exception):
"""
Raised when we do type checking to ensure objects of the wrong
type are not used improperly.
"""
def __init__(self, type_str):
super(InvalidType, self).__init__(type_str)
###### New but probably old and need to migrate
def closed_slack_debug_buffer_cb(data, buffer):
global slack_debug
slack_debug = None
return w.WEECHAT_RC_OK
def create_slack_debug_buffer():
global slack_debug, debug_string
if slack_debug is None:
debug_string = None
slack_debug = w.buffer_new("slack-debug", "", "", "closed_slack_debug_buffer_cb", "")
w.buffer_set(slack_debug, "notify", "0")
w.buffer_set(slack_debug, "highlight_tags_restrict", "highlight_force")
def load_emoji():
try:
DIR = w.info_get('weechat_dir', '')
with open('{}/weemoji.json'.format(DIR), 'r') as ef:
emojis = json.loads(ef.read())
if 'emoji' in emojis:
print_error('The weemoji.json file is in an old format. Please update it.')
else:
emoji_unicode = {key: value['unicode'] for key, value in emojis.items()}
emoji_skin_tones = {skin_tone['name']: skin_tone['unicode']
for emoji in emojis.values()
for skin_tone in emoji.get('skinVariations', {}).values()}
emoji_with_skin_tones = chain(emoji_unicode.items(), emoji_skin_tones.items())
emoji_with_skin_tones_reverse = {v: k for k, v in emoji_with_skin_tones}
return emoji_unicode, emoji_with_skin_tones_reverse
except:
dbg("Couldn't load emoji list: {}".format(format_exc_only()), 5)
return {}, {}
def parse_help_docstring(cmd):
doc = textwrap.dedent(cmd.__doc__).strip().split('\n', 1)
cmd_line = doc[0].split(None, 1)
args = ''.join(cmd_line[1:])
return cmd_line[0], args, doc[1].strip()
def setup_hooks():
w.bar_item_new('slack_typing_notice', '(extra)typing_bar_item_cb', '')
w.bar_item_new('away', '(extra)away_bar_item_cb', '')
w.bar_item_new('slack_away', '(extra)away_bar_item_cb', '')
w.hook_timer(5000, 0, 0, "ws_ping_cb", "")
w.hook_timer(1000, 0, 0, "typing_update_cb", "")
w.hook_timer(1000, 0, 0, "buffer_list_update_callback", "EVENTROUTER")
w.hook_timer(3000, 0, 0, "reconnect_callback", "EVENTROUTER")
w.hook_timer(1000 * 60 * 5, 0, 0, "slack_never_away_cb", "")
w.hook_signal('buffer_closing', "buffer_closing_callback", "")
w.hook_signal('buffer_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('window_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('quit', "quit_notification_callback", "")
if config.send_typing_notice:
w.hook_signal('input_text_changed', "typing_notification_cb", "")
command_help.completion = '|'.join(EVENTROUTER.cmds.keys())
completions = '||'.join(
'{} {}'.format(name, getattr(cmd, 'completion', ''))
for name, cmd in EVENTROUTER.cmds.items())
w.hook_command(
# Command name and description
'slack', 'Plugin to allow typing notification and sync of read markers for slack.com',
# Usage
'<command> [<command options>]',
# Description of arguments
'Commands:\n' +
'\n'.join(sorted(EVENTROUTER.cmds.keys())) +
'\nUse /slack help <command> to find out more\n',
# Completions
completions,
# Function name
'slack_command_cb', '')
w.hook_command_run('/me', 'me_command_cb', '')
w.hook_command_run('/query', 'join_query_command_cb', '')
w.hook_command_run('/join', 'join_query_command_cb', '')
w.hook_command_run('/part', 'part_command_cb', '')
w.hook_command_run('/topic', 'topic_command_cb', '')
w.hook_command_run('/msg', 'msg_command_cb', '')
w.hook_command_run('/invite', 'invite_command_cb', '')
w.hook_command_run("/input complete_next", "complete_next_cb", "")
w.hook_command_run("/input set_unread", "set_unread_cb", "")
w.hook_command_run("/input set_unread_current_buffer", "set_unread_current_buffer_cb", "")
w.hook_command_run('/away', 'away_command_cb', '')
w.hook_command_run('/whois', 'whois_command_cb', '')
for cmd_name in ['hide', 'label', 'rehistory', 'reply', 'thread']:
cmd = EVENTROUTER.cmds[cmd_name]
_, args, description = parse_help_docstring(cmd)
completion = getattr(cmd, 'completion', '')
w.hook_command(cmd_name, description, args, '', completion, 'command_' + cmd_name, '')
w.hook_completion("irc_channel_topic", "complete topic for slack", "topic_completion_cb", "")
w.hook_completion("irc_channels", "complete channels for slack", "channel_completion_cb", "")
w.hook_completion("irc_privates", "complete dms/mpdms for slack", "dm_completion_cb", "")
w.hook_completion("nicks", "complete @-nicks for slack", "nick_completion_cb", "")
w.hook_completion("threads", "complete thread ids for slack", "thread_completion_cb", "")
w.hook_completion("usergroups", "complete @-usergroups for slack", "usergroups_completion_cb", "")
w.hook_completion("emoji", "complete :emoji: for slack", "emoji_completion_cb", "")
w.key_bind("mouse", {
"@chat(python.*):button2": "hsignal:slack_mouse",
})
w.key_bind("cursor", {
"@chat(python.*):D": "hsignal:slack_cursor_delete",
"@chat(python.*):L": "hsignal:slack_cursor_linkarchive",
"@chat(python.*):M": "hsignal:slack_cursor_message",
"@chat(python.*):R": "hsignal:slack_cursor_reply",
"@chat(python.*):T": "hsignal:slack_cursor_thread",
})
w.hook_hsignal("slack_mouse", "line_event_cb", "message")
w.hook_hsignal("slack_cursor_delete", "line_event_cb", "delete")
w.hook_hsignal("slack_cursor_linkarchive", "line_event_cb", "linkarchive")
w.hook_hsignal("slack_cursor_message", "line_event_cb", "message")
w.hook_hsignal("slack_cursor_reply", "line_event_cb", "reply")
w.hook_hsignal("slack_cursor_thread", "line_event_cb", "thread")
# Hooks to fix/implement
# w.hook_signal('buffer_opened', "buffer_opened_cb", "")
# w.hook_signal('window_scrolled', "scrolled_cb", "")
# w.hook_timer(3000, 0, 0, "slack_connection_persistence_cb", "")
##### END NEW
def dbg(message, level=0, main_buffer=False, fout=False):
"""
send debug output to the slack-debug buffer and optionally write to a file.
"""
# TODO: do this smarter
if level >= config.debug_level:
global debug_string
message = "DEBUG: {}".format(message)
if fout:
with open('/tmp/debug.log', 'a+') as log_file:
log_file.writelines(message + '\n')
if main_buffer:
w.prnt("", "slack: " + message)
else:
if slack_debug and (not debug_string or debug_string in message):
w.prnt(slack_debug, message)
###### Config code
class PluginConfig(object):
Setting = collections.namedtuple('Setting', ['default', 'desc'])
# Default settings.
# These are, initially, each a (default, desc) tuple; the former is the
# default value of the setting, in the (string) format that weechat
# expects, and the latter is the user-friendly description of the setting.
# At __init__ time these values are extracted, the description is used to
# set or update the setting description for use with /help, and the default
# value is used to set the default for any settings not already defined.
# Following this procedure, the keys remain the same, but the values are
# the real (python) values of the settings.
default_settings = {
'auto_open_threads': Setting(
default='false',
desc='Automatically open threads when mentioned or in'
'response to own messages.'),
'background_load_all_history': Setting(
default='false',
desc='Load history for each channel in the background as soon as it'
' opens, rather than waiting for the user to look at it.'),
'channel_name_typing_indicator': Setting(
default='true',
desc='Change the prefix of a channel from # to > when someone is'
' typing in it. Note that this will (temporarily) affect the sort'
' order if you sort buffers by name rather than by number.'),
'color_buflist_muted_channels': Setting(
default='darkgray',
desc='Color to use for muted channels in the buflist'),
'color_deleted': Setting(
default='red',
desc='Color to use for deleted messages and files.'),
'color_edited_suffix': Setting(
default='095',
desc='Color to use for (edited) suffix on messages that have been edited.'),
'color_reaction_suffix': Setting(
default='darkgray',
desc='Color to use for the [:wave:(@user)] suffix on messages that'
' have reactions attached to them.'),
'color_reaction_suffix_added_by_you': Setting(
default='blue',
desc='Color to use for reactions that you have added.'),
'color_thread_suffix': Setting(
default='lightcyan',
desc='Color to use for the [thread: XXX] suffix on messages that'
' have threads attached to them. The special value "multiple" can'
' be used to use a different color for each thread.'),
'color_typing_notice': Setting(
default='yellow',
desc='Color to use for the typing notice.'),
'colorize_private_chats': Setting(
default='false',
desc='Whether to use nick-colors in DM windows.'),
'debug_mode': Setting(
default='false',
desc='Open a dedicated buffer for debug messages and start logging'
' to it. How verbose the logging is depends on log_level.'),
'debug_level': Setting(
default='3',
desc='Show only this level of debug info (or higher) when'
' debug_mode is on. Lower levels -> more messages.'),
'distracting_channels': Setting(
default='',
desc='List of channels to hide.'),
'external_user_suffix': Setting(
default='*',
desc='The suffix appended to nicks to indicate external users.'),
'files_download_location': Setting(
default='',
desc='If set, file attachments will be automatically downloaded'
' to this location. "%h" will be replaced by WeeChat home,'
' "~/.weechat" by default.'),
'group_name_prefix': Setting(
default='&',
desc='The prefix of buffer names for groups (private channels).'),
'map_underline_to': Setting(
default='_',
desc='When sending underlined text to slack, use this formatting'
' character for it. The default ("_") sends it as italics. Use'
' "*" to send bold instead.'),
'muted_channels_activity': Setting(
default='personal_highlights',
desc="Control which activity you see from muted channels, either"
" none, personal_highlights, all_highlights or all. none: Don't"
" show any activity. personal_highlights: Only show personal"
" highlights, i.e. not @channel and @here. all_highlights: Show"
" all highlights, but not other messages. all: Show all activity,"
" like other channels."),
'notify_usergroup_handle_updated': Setting(
default='false',
desc="Control if you want to see notification when a usergroup's"
" handle has changed, either true or false."),
'never_away': Setting(
default='false',
desc='Poke Slack every five minutes so that it never marks you "away".'),
'record_events': Setting(
default='false',
desc='Log all traffic from Slack to disk as JSON.'),
'render_bold_as': Setting(
default='bold',
desc='When receiving bold text from Slack, render it as this in weechat.'),
'render_emoji_as_string': Setting(
default='false',
desc="Render emojis as :emoji_name: instead of emoji characters. Enable this"
" if your terminal doesn't support emojis, or set to 'both' if you want to"
" see both renderings. Note that even though this is"
" disabled by default, you need to place {}/blob/master/weemoji.json in your"
" weechat directory to enable rendering emojis as emoji characters."
.format(REPO_URL)),
'render_italic_as': Setting(
default='italic',
desc='When receiving bold text from Slack, render it as this in weechat.'
' If your terminal lacks italic support, consider using "underline" instead.'),
'send_typing_notice': Setting(
default='true',
desc='Alert Slack users when you are typing a message in the input bar '
'(Requires reload)'),
'server_aliases': Setting(
default='',
desc='A comma separated list of `subdomain:alias` pairs. The alias'
' will be used instead of the actual name of the slack (in buffer'
' names, logging, etc). E.g `work:no_fun_allowed` would make your'
' work slack show up as `no_fun_allowed` rather than `work.slack.com`.'),
'shared_name_prefix': Setting(
default='%',
desc='The prefix of buffer names for shared channels.'),
'short_buffer_names': Setting(
default='false',
desc='Use `foo.#channel` rather than `foo.slack.com.#channel` as the'
' internal name for Slack buffers.'),
'show_buflist_presence': Setting(
default='true',
desc='Display a `+` character in the buffer list for present users.'),
'show_reaction_nicks': Setting(
default='false',
desc='Display the name of the reacting user(s) alongside each reactji.'),
'slack_api_token': Setting(
default='INSERT VALID KEY HERE!',
desc='List of Slack API tokens, one per Slack instance you want to'
' connect to. See the README for details on how to get these.'),
'slack_timeout': Setting(
default='20000',
desc='How long (ms) to wait when communicating with Slack.'),
'switch_buffer_on_join': Setting(
default='true',
desc='When /joining a channel, automatically switch to it as well.'),
'thread_messages_in_channel': Setting(
default='false',
desc='When enabled shows thread messages in the parent channel.'),
'unfurl_ignore_alt_text': Setting(
default='false',
desc='When displaying ("unfurling") links to channels/users/etc,'
' ignore the "alt text" present in the message and instead use the'
' canonical name of the thing being linked to.'),
'unfurl_auto_link_display': Setting(
default='both',
desc='When displaying ("unfurling") links to channels/users/etc,'
' determine what is displayed when the text matches the url'
' without the protocol. This happens when Slack automatically'
' creates links, e.g. from words separated by dots or email'
' addresses. Set it to "text" to only display the text written by'
' the user, "url" to only display the url or "both" (the default)'
' to display both.'),
'unhide_buffers_with_activity': Setting(
default='false',
desc='When activity occurs on a buffer, unhide it even if it was'
' previously hidden (whether by the user or by the'
' distracting_channels setting).'),
'use_full_names': Setting(
default='false',
desc='Use full names as the nicks for all users. When this is'
' false (the default), display names will be used if set, with a'
' fallback to the full name if display name is not set.'),
}
# Set missing settings to their defaults. Load non-missing settings from
# weechat configs.
def __init__(self):
self.settings = {}
# Set all descriptions, replace the values in the dict with the
# default setting value rather than the (setting,desc) tuple.
for key, (default, desc) in self.default_settings.items():
w.config_set_desc_plugin(key, desc)
self.settings[key] = default
# Migrate settings from old versions of Weeslack...
self.migrate()
# ...and then set anything left over from the defaults.
for key, default in self.settings.items():
if not w.config_get_plugin(key):
w.config_set_plugin(key, default)
self.config_changed(None, None, None)
def __str__(self):
return "".join([x + "\t" + str(self.settings[x]) + "\n" for x in self.settings.keys()])
def config_changed(self, data, key, value):
for key in self.settings:
self.settings[key] = self.fetch_setting(key)
if self.debug_mode:
create_slack_debug_buffer()
return w.WEECHAT_RC_OK
def fetch_setting(self, key):
try:
return getattr(self, 'get_' + key)(key)
except AttributeError:
# Most settings are on/off, so make get_boolean the default
return self.get_boolean(key)
except:
# There was setting-specific getter, but it failed.
return self.settings[key]
def __getattr__(self, key):
try:
return self.settings[key]
except KeyError:
raise AttributeError(key)
def get_boolean(self, key):
return w.config_string_to_boolean(w.config_get_plugin(key))
def get_string(self, key):
return w.config_get_plugin(key)
def get_int(self, key):
return int(w.config_get_plugin(key))
def is_default(self, key):
default = self.default_settings.get(key).default
return w.config_get_plugin(key) == default
get_color_buflist_muted_channels = get_string
get_color_deleted = get_string
get_color_edited_suffix = get_string
get_color_reaction_suffix = get_string
get_color_reaction_suffix_added_by_you = get_string
get_color_thread_suffix = get_string
get_color_typing_notice = get_string
get_debug_level = get_int
get_external_user_suffix = get_string
get_files_download_location = get_string
get_group_name_prefix = get_string
get_map_underline_to = get_string
get_muted_channels_activity = get_string
get_render_bold_as = get_string
get_render_italic_as = get_string
get_shared_name_prefix = get_string
get_slack_timeout = get_int
get_unfurl_auto_link_display = get_string
def get_distracting_channels(self, key):
return [x.strip() for x in w.config_get_plugin(key).split(',') if x]
def get_server_aliases(self, key):
alias_list = w.config_get_plugin(key)
return dict(item.split(":") for item in alias_list.split(",") if ':' in item)
def get_slack_api_token(self, key):
token = w.config_get_plugin("slack_api_token")
if token.startswith('${sec.data'):
return w.string_eval_expression(token, {}, {}, {})
else:
return token
def get_render_emoji_as_string(self, key):
s = w.config_get_plugin(key)
if s == 'both':
return s
return w.config_string_to_boolean(s)
def migrate(self):
"""
This is to migrate the extension name from slack_extension to slack
"""
if not w.config_get_plugin("migrated"):
for k in self.settings.keys():
if not w.config_is_set_plugin(k):
p = w.config_get("plugins.var.python.slack_extension.{}".format(k))
data = w.config_string(p)
if data != "":
w.config_set_plugin(k, data)
w.config_set_plugin("migrated", "true")
old_thread_color_config = w.config_get_plugin("thread_suffix_color")
new_thread_color_config = w.config_get_plugin("color_thread_suffix")
if old_thread_color_config and not new_thread_color_config:
w.config_set_plugin("color_thread_suffix", old_thread_color_config)
def config_server_buffer_cb(data, key, value):
for team in EVENTROUTER.teams.values():
team.buffer_merge(value)
return w.WEECHAT_RC_OK
# to Trace execution, add `setup_trace()` to startup
# and to a function and sys.settrace(trace_calls) to a function
def setup_trace():
global f
now = time.time()
f = open('{}/{}-trace.json'.format(RECORD_DIR, now), 'w')
def trace_calls(frame, event, arg):
global f
if event != 'call':
return
co = frame.f_code
func_name = co.co_name
if func_name == 'write':
# Ignore write() calls from print statements
return
func_line_no = frame.f_lineno
func_filename = co.co_filename
caller = frame.f_back
caller_line_no = caller.f_lineno
caller_filename = caller.f_code.co_filename
print('Call to %s on line %s of %s from line %s of %s' % \
(func_name, func_line_no, func_filename,
caller_line_no, caller_filename), file=f)
f.flush()
return
def initiate_connection(token, retries=3, team=None):
return SlackRequest(team,
'rtm.{}'.format('connect' if team else 'start'),
{"batch_presence_aware": 1},
retries=retries,
token=token,
metadata={'initial_connection': True})
if __name__ == "__main__":
w = WeechatWrapper(weechat)
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "script_unloaded", ""):
weechat_version = w.info_get("version_number", "") or 0
if int(weechat_version) < 0x1030000:
w.prnt("", "\nERROR: Weechat version 1.3+ is required to use {}.\n\n".format(SCRIPT_NAME))
else:
global EVENTROUTER
EVENTROUTER = EventRouter()
receive_httprequest_callback = EVENTROUTER.receive_httprequest_callback
receive_ws_callback = EVENTROUTER.receive_ws_callback
# Global var section
slack_debug = None
config = PluginConfig()
config_changed_cb = config.config_changed
typing_timer = time.time()
hide_distractions = False
w.hook_config("plugins.var.python." + SCRIPT_NAME + ".*", "config_changed_cb", "")
w.hook_config("irc.look.server_buffer", "config_server_buffer_cb", "")
w.hook_modifier("input_text_for_buffer", "input_text_for_buffer_cb", "")
EMOJI, EMOJI_WITH_SKIN_TONES_REVERSE = load_emoji()
setup_hooks()
# attach to the weechat hooks we need
tokens = [token.strip() for token in config.slack_api_token.split(',')]
w.prnt('', 'Connecting to {} slack team{}.'
.format(len(tokens), '' if len(tokens) == 1 else 's'))
for t in tokens:
s = initiate_connection(t)
EVENTROUTER.receive(s)
if config.record_events:
EVENTROUTER.record()
EVENTROUTER.handle_next()
# END attach to the weechat hooks we need
hdata = Hdata(w)
| gpl-3.0 | -5,768,731,387,078,710,000 | 37.79681 | 277 | 0.603911 | false |
partofthething/home-assistant | homeassistant/components/fortios/device_tracker.py | 24 | 2933 | """
Support to use FortiOS device like FortiGate as device tracker.
This component is part of the device_tracker platform.
"""
import logging
from fortiosapi import FortiOSAPI
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_TOKEN, CONF_VERIFY_SSL
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_VERIFY_SSL = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a FortiOSDeviceScanner."""
host = config[DOMAIN][CONF_HOST]
verify_ssl = config[DOMAIN][CONF_VERIFY_SSL]
token = config[DOMAIN][CONF_TOKEN]
fgt = FortiOSAPI()
try:
fgt.tokenlogin(host, token, verify_ssl)
except ConnectionError as ex:
_LOGGER.error("ConnectionError to FortiOS API: %s", ex)
return None
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error("Failed to login to FortiOS API: %s", ex)
return None
return FortiOSDeviceScanner(fgt)
class FortiOSDeviceScanner(DeviceScanner):
"""This class queries a FortiOS unit for connected devices."""
def __init__(self, fgt) -> None:
"""Initialize the scanner."""
self._clients = {}
self._clients_json = {}
self._fgt = fgt
def update(self):
"""Update clients from the device."""
clients_json = self._fgt.monitor("user/device/select", "")
self._clients_json = clients_json
self._clients = []
if clients_json:
for client in clients_json["results"]:
if client["last_seen"] < 180:
self._clients.append(client["mac"].upper())
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self.update()
return self._clients
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
_LOGGER.debug("Getting name of device %s", device)
device = device.lower()
data = self._clients_json
if data == 0:
_LOGGER.error("No json results to get device names")
return None
for client in data["results"]:
if client["mac"] == device:
try:
name = client["host"]["name"]
_LOGGER.debug("Getting device name=%s", name)
return name
except KeyError as kex:
_LOGGER.error("Name not found in client data: %s", kex)
return None
return None
| mit | 5,338,194,131,105,834,000 | 28.33 | 78 | 0.609615 | false |
msarana/selenium_python | ENV/Lib/site-packages/pip/_vendor/html5lib/treewalkers/dom.py | 505 | 1421 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
from . import _base
class TreeWalker(_base.NonRecursiveTreeWalker):
def getNodeDetails(self, node):
if node.nodeType == Node.DOCUMENT_TYPE_NODE:
return _base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
return _base.TEXT, node.nodeValue
elif node.nodeType == Node.ELEMENT_NODE:
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
if attr.namespaceURI:
attrs[(attr.namespaceURI, attr.localName)] = attr.value
else:
attrs[(None, attr.name)] = attr.value
return (_base.ELEMENT, node.namespaceURI, node.nodeName,
attrs, node.hasChildNodes())
elif node.nodeType == Node.COMMENT_NODE:
return _base.COMMENT, node.nodeValue
elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
return (_base.DOCUMENT,)
else:
return _base.UNKNOWN, node.nodeType
def getFirstChild(self, node):
return node.firstChild
def getNextSibling(self, node):
return node.nextSibling
def getParentNode(self, node):
return node.parentNode
| apache-2.0 | -9,084,194,263,731,290,000 | 32.046512 | 80 | 0.612245 | false |
elliott10/qemu-instru-tracer | scripts/qemu-gdb.py | 286 | 2813 | #!/usr/bin/python
# GDB debugging support
#
# Copyright 2012 Red Hat, Inc. and/or its affiliates
#
# Authors:
# Avi Kivity <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# Contributions after 2012-01-13 are licensed under the terms of the
# GNU GPL, version 2 or (at your option) any later version.
import gdb
def isnull(ptr):
return ptr == gdb.Value(0).cast(ptr.type)
def int128(p):
return long(p['lo']) + (long(p['hi']) << 64)
class QemuCommand(gdb.Command):
'''Prefix for QEMU debug support commands'''
def __init__(self):
gdb.Command.__init__(self, 'qemu', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE, True)
class MtreeCommand(gdb.Command):
'''Display the memory tree hierarchy'''
def __init__(self):
gdb.Command.__init__(self, 'qemu mtree', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
self.queue = []
def invoke(self, arg, from_tty):
self.seen = set()
self.queue_root('address_space_memory')
self.queue_root('address_space_io')
self.process_queue()
def queue_root(self, varname):
ptr = gdb.parse_and_eval(varname)['root']
self.queue.append(ptr)
def process_queue(self):
while self.queue:
ptr = self.queue.pop(0)
if long(ptr) in self.seen:
continue
self.print_item(ptr)
def print_item(self, ptr, offset = gdb.Value(0), level = 0):
self.seen.add(long(ptr))
addr = ptr['addr']
addr += offset
size = int128(ptr['size'])
alias = ptr['alias']
klass = ''
if not isnull(alias):
klass = ' (alias)'
elif not isnull(ptr['ops']):
klass = ' (I/O)'
elif bool(ptr['ram']):
klass = ' (RAM)'
gdb.write('%s%016x-%016x %s%s (@ %s)\n'
% (' ' * level,
long(addr),
long(addr + (size - 1)),
ptr['name'].string(),
klass,
ptr,
),
gdb.STDOUT)
if not isnull(alias):
gdb.write('%s alias: %s@%016x (@ %s)\n' %
(' ' * level,
alias['name'].string(),
ptr['alias_offset'],
alias,
),
gdb.STDOUT)
self.queue.append(alias)
subregion = ptr['subregions']['tqh_first']
level += 1
while not isnull(subregion):
self.print_item(subregion, addr, level)
subregion = subregion['subregions_link']['tqe_next']
QemuCommand()
MtreeCommand()
| gpl-2.0 | 8,246,101,005,666,771,000 | 30.606742 | 71 | 0.503733 | false |
Ictp/indico | doc/api/source/event_api_docs.py | 2 | 2619 | import StringIO
import os, sys, re, types
from zope.interface import Interface, interface
import conf
PATH = '../../../indico/'
from MaKaC import common
from indico.core.extpoint import IListener, IContributor
def iterate_sources(dir, exclude=[]):
"""
iterates through all *.py files inside a dir, recursively
"""
for dirname, dirnames, filenames in os.walk(dir):
for filename in filenames:
relDir = os.path.relpath(dirname, dir)
cont = False
for exc in exclude:
if relDir.startswith(exc):
cont = True
if cont:
continue
m = re.match(r'^(.*)\.py$', filename)
if m:
name = m.group(1)
rel = os.path.relpath(dirname, dir).split('/')
if rel == ['.']:
yield 'indico'
elif name == '__init__':
yield '.'.join(['indico'] + rel)
else:
yield '.'.join(['indico'] + rel + [name])
def docsFor(mod, iface, content):
path = "%s.%s" % (mod, iface.__name__)
content.write(""".. autointerface:: %s\n""" % path)
def _rst_title(text, char='='):
return "%s\n%s\n%s\n" % (char * len(text), text, char * len(text))
def gatherInfo(mod, content):
first = True
for elem, val in mod.__dict__.iteritems():
if type(val) == interface.InterfaceClass:
if val.__module__ == mod.__name__ and \
(val.extends(IListener) or val.extends(IContributor)):
if first:
content.write(_rst_title(mod.__name__, char='-'))
content.write(""".. automodule:: %s\n""" % mod.__name__)
first = False
if val.extends(IListener):
docsFor(mod.__name__, val, content)
elif val.extends(IContributor):
docsFor(mod.__name__, val, content)
def main(fname):
"""
main function
"""
content = StringIO.StringIO()
content.write(_rst_title("Listener/Contributor API"))
for f in iterate_sources(PATH, exclude=["MaKaC/po"]):
# try:
try:
mod = __import__(f)
for pelem in f.split('.')[1:]:
mod = getattr(mod, pelem)
gatherInfo(mod, content)
except ImportError:
sys.stderr.write("Import of '%s' failed!\n" % f)
with open(fname, 'w') as fd:
fd.write(content.getvalue())
content.close()
if __name__ == '__main__':
main(sys.argv[1])
| gpl-3.0 | 5,479,062,163,109,221,000 | 26.28125 | 76 | 0.499809 | false |
qiyuangong/leetcode | python/404_Sum_of_Left_Leaves.py | 2 | 1068 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
# def sumOfLeftLeaves(self, root):
# """
# :type root: TreeNode
# :rtype: int
# """
# if root is None:
# return 0
# if root.left is not None:
# if root.left.left is None and root.left.right is None:
# return root.left.val + self.sumOfLeftLeaves(root.right)
# return self.sumOfLeftLeaves(root.left) + self.sumOfLeftLeaves(root.right)
def sumOfLeftLeaves(self, root):
stack = [root]
res = 0
while len(stack) > 0:
curr = stack.pop(0)
if curr is not None:
if curr.left is not None:
if curr.left.left is None and curr.left.right is None:
res += curr.left.val
stack.insert(0, curr.right)
stack.insert(0, curr.left)
return res
| mit | -1,721,707,686,396,930,300 | 32.375 | 83 | 0.51779 | false |
taigaio/taiga-back | taiga/importers/asana/api.py | 1 | 5842 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext as _
from django.conf import settings
from taiga.base.api import viewsets
from taiga.base import response
from taiga.base import exceptions as exc
from taiga.base.decorators import list_route
from taiga.users.services import get_user_photo_url
from taiga.users.gravatar import get_user_gravatar_id
from taiga.importers import permissions, exceptions
from taiga.importers.services import resolve_users_bindings
from .importer import AsanaImporter
from . import tasks
class AsanaImporterViewSet(viewsets.ViewSet):
permission_classes = (permissions.ImporterPermission,)
@list_route(methods=["POST"])
def list_users(self, request, *args, **kwargs):
self.check_permissions(request, "list_users", None)
token = request.DATA.get('token', None)
project_id = request.DATA.get('project', None)
if not project_id:
raise exc.WrongArguments(_("The project param is needed"))
importer = AsanaImporter(request.user, token)
try:
users = importer.list_users(project_id)
except exceptions.InvalidRequest:
raise exc.BadRequest(_('Invalid Asana API request'))
except exceptions.FailedRequest:
raise exc.BadRequest(_('Failed to make the request to Asana API'))
for user in users:
if user['detected_user']:
user['user'] = {
'id': user['detected_user'].id,
'full_name': user['detected_user'].get_full_name(),
'gravatar_id': get_user_gravatar_id(user['detected_user']),
'photo': get_user_photo_url(user['detected_user']),
}
del(user['detected_user'])
return response.Ok(users)
@list_route(methods=["POST"])
def list_projects(self, request, *args, **kwargs):
self.check_permissions(request, "list_projects", None)
token = request.DATA.get('token', None)
importer = AsanaImporter(request.user, token)
try:
projects = importer.list_projects()
except exceptions.InvalidRequest:
raise exc.BadRequest(_('Invalid Asana API request'))
except exceptions.FailedRequest:
raise exc.BadRequest(_('Failed to make the request to Asana API'))
return response.Ok(projects)
@list_route(methods=["POST"])
def import_project(self, request, *args, **kwargs):
self.check_permissions(request, "import_project", None)
token = request.DATA.get('token', None)
project_id = request.DATA.get('project', None)
if not project_id:
raise exc.WrongArguments(_("The project param is needed"))
options = {
"name": request.DATA.get('name', None),
"description": request.DATA.get('description', None),
"template": request.DATA.get('template', "scrum"),
"users_bindings": resolve_users_bindings(request.DATA.get("users_bindings", {})),
"keep_external_reference": request.DATA.get("keep_external_reference", False),
"is_private": request.DATA.get("is_private", False),
}
if settings.CELERY_ENABLED:
task = tasks.import_project.delay(request.user.id, token, project_id, options)
return response.Accepted({"task_id": task.id})
importer = AsanaImporter(request.user, token)
project = importer.import_project(project_id, options)
project_data = {
"slug": project.slug,
"my_permissions": ["view_us"],
"is_backlog_activated": project.is_backlog_activated,
"is_kanban_activated": project.is_kanban_activated,
}
return response.Ok(project_data)
@list_route(methods=["GET"])
def auth_url(self, request, *args, **kwargs):
self.check_permissions(request, "auth_url", None)
url = AsanaImporter.get_auth_url(
settings.IMPORTERS.get('asana', {}).get('app_id', None),
settings.IMPORTERS.get('asana', {}).get('app_secret', None),
settings.IMPORTERS.get('asana', {}).get('callback_url', None)
)
return response.Ok({"url": url})
@list_route(methods=["POST"])
def authorize(self, request, *args, **kwargs):
self.check_permissions(request, "authorize", None)
code = request.DATA.get('code', None)
if code is None:
raise exc.BadRequest(_("Code param needed"))
try:
asana_token = AsanaImporter.get_access_token(
code,
settings.IMPORTERS.get('asana', {}).get('app_id', None),
settings.IMPORTERS.get('asana', {}).get('app_secret', None),
settings.IMPORTERS.get('asana', {}).get('callback_url', None)
)
except exceptions.InvalidRequest:
raise exc.BadRequest(_('Invalid Asana API request'))
except exceptions.FailedRequest:
raise exc.BadRequest(_('Failed to make the request to Asana API'))
return response.Ok({"token": asana_token})
| agpl-3.0 | -5,992,208,784,804,923,000 | 39.569444 | 93 | 0.631462 | false |
htwenhe/DJOA | env/Lib/site-packages/django/conf/locale/cy/formats.py | 504 | 1822 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '25 Hydref 2006'
TIME_FORMAT = 'P' # '2:30 y.b.'
DATETIME_FORMAT = 'j F Y, P' # '25 Hydref 2006, 2:30 y.b.'
YEAR_MONTH_FORMAT = 'F Y' # 'Hydref 2006'
MONTH_DAY_FORMAT = 'j F' # '25 Hydref'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 y.b.'
FIRST_DAY_OF_WEEK = 1 # 'Dydd Llun'
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| mit | 4,262,840,898,290,709,500 | 46.947368 | 77 | 0.467069 | false |
baruch/libsigrokdecode | decoders/usb_packet/pd.py | 10 | 12962 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2011 Gareth McMullin <[email protected]>
## Copyright (C) 2012-2014 Uwe Hermann <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import sigrokdecode as srd
'''
OUTPUT_PYTHON format:
Packet:
[<ptype>, <pdata>]
<ptype>, <pdata>:
- 'SYNC', <sync>
- 'PID', <pid>
- 'ADDR', <addr>
- 'EP', <ep>
- 'CRC5', <crc5>
- 'CRC16', <crc16>
- 'EOP', <eop>
- 'FRAMENUM', <framenum>
- 'DATABYTE', <databyte>
- 'HUBADDR', <hubaddr>
- 'SC', <sc>
- 'PORT', <port>
- 'S', <s>
- 'E/U', <e/u>
- 'ET', <et>
- 'PACKET', [<pcategory>, <pname>, <pinfo>]
<pcategory>, <pname>, <pinfo>:
- 'TOKEN', 'OUT', [<sync>, <pid>, <addr>, <ep>, <crc5>, <eop>]
- 'TOKEN', 'IN', [<sync>, <pid>, <addr>, <ep>, <crc5>, <eop>]
- 'TOKEN', 'SOF', [<sync>, <pid>, <framenum>, <crc5>, <eop>]
- 'TOKEN', 'SETUP', [<sync>, <pid>, <addr>, <ep>, <crc5>, <eop>]
- 'DATA', 'DATA0', [<sync>, <pid>, <databytes>, <crc16>, <eop>]
- 'DATA', 'DATA1', [<sync>, <pid>, <databytes>, <crc16>, <eop>]
- 'DATA', 'DATA2', [<sync>, <pid>, <databytes>, <crc16>, <eop>]
- 'DATA', 'MDATA', [<sync>, <pid>, <databytes>, <crc16>, <eop>]
- 'HANDSHAKE', 'ACK', [<sync>, <pid>, <eop>]
- 'HANDSHAKE', 'NAK', [<sync>, <pid>, <eop>]
- 'HANDSHAKE', 'STALL', [<sync>, <pid>, <eop>]
- 'HANDSHAKE', 'NYET', [<sync>, <pid>, <eop>]
- 'SPECIAL', 'PRE', [<sync>, <pid>, <addr>, <ep>, <crc5>, <eop>]
- 'SPECIAL', 'ERR', [<sync>, <pid>, <eop>]
- 'SPECIAL', 'SPLIT',
[<sync>, <pid>, <hubaddr>, <sc>, <port>, <s>, <e/u>, <et>, <crc5>, <eop>]
- 'SPECIAL', 'PING', [<sync>, <pid>, <addr>, <ep>, <crc5>, <eop>]
- 'SPECIAL', 'Reserved', None
<sync>: SYNC field bitstring, normally '00000001' (8 chars).
<pid>: Packet ID bitstring, e.g. '11000011' for DATA0 (8 chars).
<addr>: Address field number, 0-127 (7 bits).
<ep>: Endpoint number, 0-15 (4 bits).
<crc5>: CRC-5 number (5 bits).
<crc16>: CRC-16 number (16 bits).
<eop>: End of packet marker. List of symbols, usually ['SE0', 'SE0', 'J'].
<framenum>: USB (micro)frame number, 0-2047 (11 bits).
<databyte>: A single data byte, e.g. 0x55.
<databytes>: List of data bytes, e.g. [0x55, 0xaa, 0x99] (0 - 1024 bytes).
<hubaddr>: TODO
<sc>: TODO
<port>: TODO
<s>: TODO
<e/u>: TODO
<et>: TODO
'''
# Packet IDs (PIDs).
# The first 4 bits are the 'packet type' field, the last 4 bits are the
# 'check field' (each bit in the check field must be the inverse of the resp.
# bit in the 'packet type' field; if not, that's a 'PID error').
# For the 4-bit strings, the left-most '1' or '0' is the LSB, i.e. it's sent
# to the bus first.
pids = {
# Tokens
'10000111': ['OUT', 'Address & EP number in host-to-function transaction'],
'10010110': ['IN', 'Address & EP number in function-to-host transaction'],
'10100101': ['SOF', 'Start-Of-Frame marker & frame number'],
'10110100': ['SETUP', 'Address & EP number in host-to-function transaction for SETUP to a control pipe'],
# Data
# Note: DATA2 and MDATA are HS-only.
'11000011': ['DATA0', 'Data packet PID even'],
'11010010': ['DATA1', 'Data packet PID odd'],
'11100001': ['DATA2', 'Data packet PID HS, high bandwidth isosynchronous transaction in a microframe'],
'11110000': ['MDATA', 'Data packet PID HS for split and high-bandwidth isosynchronous transactions'],
# Handshake
'01001011': ['ACK', 'Receiver accepts error-free packet'],
'01011010': ['NAK', 'Receiver cannot accept or transmitter cannot send'],
'01111000': ['STALL', 'EP halted or control pipe request unsupported'],
'01101001': ['NYET', 'No response yet from receiver'],
# Special
'00111100': ['PRE', 'Host-issued preamble; enables downstream bus traffic to low-speed devices'],
'00111100': ['ERR', 'Split transaction error handshake'],
'00011110': ['SPLIT', 'HS split transaction token'],
'00101101': ['PING', 'HS flow control probe for a bulk/control EP'],
'00001111': ['Reserved', 'Reserved PID'],
}
def get_category(pidname):
if pidname in ('OUT', 'IN', 'SOF', 'SETUP'):
return 'TOKEN'
elif pidname in ('DATA0', 'DATA1', 'DATA2', 'MDATA'):
return 'DATA'
elif pidname in ('ACK', 'NAK', 'STALL', 'NYET'):
return 'HANDSHAKE'
else:
return 'SPECIAL'
def ann_index(pidname):
l = ['OUT', 'IN', 'SOF', 'SETUP', 'DATA0', 'DATA1', 'DATA2', 'MDATA',
'ACK', 'NAK', 'STALL', 'NYET', 'PRE', 'ERR', 'SPLIT', 'PING',
'Reserved']
if pidname not in l:
return 28
return l.index(pidname) + 11
def bitstr_to_num(bitstr):
if not bitstr:
return 0
l = list(bitstr)
l.reverse()
return int(''.join(l), 2)
class Decoder(srd.Decoder):
api_version = 2
id = 'usb_packet'
name = 'USB packet'
longname = 'Universal Serial Bus (LS/FS) packet'
desc = 'USB (low-speed and full-speed) packet protocol.'
license = 'gplv2+'
inputs = ['usb_signalling']
outputs = ['usb_packet']
options = (
{'id': 'signalling', 'desc': 'Signalling',
'default': 'full-speed', 'values': ('full-speed', 'low-speed')},
)
annotations = (
('sync-ok', 'SYNC'),
('sync-err', 'SYNC (error)'),
('pid', 'PID'),
('framenum', 'FRAMENUM'),
('addr', 'ADDR'),
('ep', 'EP'),
('crc5-ok', 'CRC5'),
('crc5-err', 'CRC5 (error)'),
('data', 'DATA'),
('crc16-ok', 'CRC16'),
('crc16-err', 'CRC16 (error)'),
('packet-out', 'Packet: OUT'),
('packet-in', 'Packet: IN'),
('packet-sof', 'Packet: SOF'),
('packet-setup', 'Packet: SETUP'),
('packet-data0', 'Packet: DATA0'),
('packet-data1', 'Packet: DATA1'),
('packet-data2', 'Packet: DATA2'),
('packet-mdata', 'Packet: MDATA'),
('packet-ack', 'Packet: ACK'),
('packet-nak', 'Packet: NAK'),
('packet-stall', 'Packet: STALL'),
('packet-nyet', 'Packet: NYET'),
('packet-pre', 'Packet: PRE'),
('packet-err', 'Packet: ERR'),
('packet-split', 'Packet: SPLIT'),
('packet-ping', 'Packet: PING'),
('packet-reserved', 'Packet: Reserved'),
('packet-invalid', 'Packet: Invalid'),
)
annotation_rows = (
('fields', 'Packet fields', tuple(range(10 + 1))),
('packet', 'Packets', tuple(range(11, 28 + 1))),
)
def __init__(self):
self.bits = []
self.packet = []
self.packet_summary = ''
self.ss = self.es = None
self.ss_packet = self.es_packet = None
self.state = 'WAIT FOR SOP'
def putpb(self, data):
self.put(self.ss, self.es, self.out_python, data)
def putb(self, data):
self.put(self.ss, self.es, self.out_ann, data)
def putpp(self, data):
self.put(self.ss_packet, self.es_packet, self.out_python, data)
def putp(self, data):
self.put(self.ss_packet, self.es_packet, self.out_ann, data)
def start(self):
self.out_python = self.register(srd.OUTPUT_PYTHON)
self.out_ann = self.register(srd.OUTPUT_ANN)
def handle_packet(self):
packet = ''
for (bit, ss, es) in self.bits:
packet += bit
# Bits[0:7]: SYNC
sync = packet[:7 + 1]
self.ss, self.es = self.bits[0][1], self.bits[7][2]
# The SYNC pattern for low-speed/full-speed is KJKJKJKK (00000001).
if sync != '00000001':
self.putpb(['SYNC ERROR', sync])
self.putb([1, ['SYNC ERROR: %s' % sync, 'SYNC ERR: %s' % sync,
'SYNC ERR', 'SE', 'S']])
else:
self.putpb(['SYNC', sync])
self.putb([0, ['SYNC: %s' % sync, 'SYNC', 'S']])
self.packet.append(sync)
# Bits[8:15]: PID
pid = packet[8:15 + 1]
pidname = pids.get(pid, (pid, ''))[0]
self.ss, self.es = self.bits[8][1], self.bits[15][2]
self.putpb(['PID', pidname])
self.putb([2, ['PID: %s' % pidname, pidname, pidname[0]]])
self.packet.append(pid)
self.packet_summary += pidname
if pidname in ('OUT', 'IN', 'SOF', 'SETUP', 'PRE', 'PING'):
if pidname == 'SOF':
# Bits[16:26]: Framenum
framenum = bitstr_to_num(packet[16:26 + 1])
self.ss, self.es = self.bits[16][1], self.bits[26][2]
self.putpb(['FRAMENUM', framenum])
self.putb([3, ['Frame: %d' % framenum, 'Frame', 'Fr', 'F']])
self.packet.append(framenum)
self.packet_summary += ' %d' % framenum
else:
# Bits[16:22]: Addr
addr = bitstr_to_num(packet[16:22 + 1])
self.ss, self.es = self.bits[16][1], self.bits[22][2]
self.putpb(['ADDR', addr])
self.putb([4, ['Address: %d' % addr, 'Addr: %d' % addr,
'Addr', 'A']])
self.packet.append(addr)
self.packet_summary += ' ADDR %d' % addr
# Bits[23:26]: EP
ep = bitstr_to_num(packet[23:26 + 1])
self.ss, self.es = self.bits[23][1], self.bits[26][2]
self.putpb(['EP', ep])
self.putb([5, ['Endpoint: %d' % ep, 'EP: %d' % ep, 'EP', 'E']])
self.packet.append(ep)
self.packet_summary += ' EP %d' % ep
# Bits[27:31]: CRC5
crc5 = bitstr_to_num(packet[27:31 + 1])
self.ss, self.es = self.bits[27][1], self.bits[31][2]
self.putpb(['CRC5', crc5])
self.putb([6, ['CRC5: 0x%02X' % crc5, 'CRC5', 'C']])
self.packet.append(crc5)
elif pidname in ('DATA0', 'DATA1', 'DATA2', 'MDATA'):
# Bits[16:packetlen-16]: Data
data = packet[16:-16]
# TODO: len(data) must be a multiple of 8.
databytes = []
self.packet_summary += ' ['
for i in range(0, len(data), 8):
db = bitstr_to_num(data[i:i + 8])
self.ss, self.es = self.bits[16 + i][1], self.bits[23 + i][2]
self.putpb(['DATABYTE', db])
self.putb([8, ['Databyte: %02X' % db, 'Data: %02X' % db,
'DB: %02X' % db, '%02X' % db]])
databytes.append(db)
self.packet_summary += ' %02X' % db
data = data[8:]
self.packet_summary += ' ]'
# Convenience Python output (no annotation) for all bytes together.
self.ss, self.es = self.bits[16][1], self.bits[-16][2]
self.putpb(['DATABYTES', databytes])
self.packet.append(databytes)
# Bits[packetlen-16:packetlen]: CRC16
crc16 = bitstr_to_num(packet[-16:])
self.ss, self.es = self.bits[-16][1], self.bits[-1][2]
self.putpb(['CRC16', crc16])
self.putb([9, ['CRC16: 0x%04X' % crc16, 'CRC16', 'C']])
self.packet.append(crc16)
elif pidname in ('ACK', 'NAK', 'STALL', 'NYET', 'ERR'):
pass # Nothing to do, these only have SYNC+PID+EOP fields.
else:
pass # TODO: Handle 'SPLIT' and possibly 'Reserved' packets.
# Output a (summary of) the whole packet.
pcategory, pname, pinfo = get_category(pidname), pidname, self.packet
self.putpp(['PACKET', [pcategory, pname, pinfo]])
self.putp([ann_index(pidname), ['%s' % self.packet_summary]])
self.packet, self.packet_summary = [], ''
def decode(self, ss, es, data):
(ptype, pdata) = data
# We only care about certain packet types for now.
if ptype not in ('SOP', 'BIT', 'EOP'):
return
# State machine.
if self.state == 'WAIT FOR SOP':
if ptype != 'SOP':
return
self.ss_packet = ss
self.state = 'GET BIT'
elif self.state == 'GET BIT':
if ptype == 'BIT':
self.bits.append([pdata, ss, es])
elif ptype == 'EOP':
self.es_packet = es
self.handle_packet()
self.bits, self.state = [], 'WAIT FOR SOP'
else:
pass # TODO: Error
| gpl-3.0 | -2,096,504,582,498,325,200 | 37.235988 | 109 | 0.534794 | false |
spennihana/h2o-3 | h2o-py/tests/testdir_hdfs/pyunit_NOFEATURE_INTERNAL_HDFS_import_folder_csv_orc_same_milsongs.py | 4 | 1316 | from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
import time
from tests import pyunit_utils
#----------------------------------------------------------------------
# This test is used to show what happens if we split the same datasets
# into one part csv, one part orc
#----------------------------------------------------------------------
def hdfs_orc_parser():
# Check if we are running inside the H2O network by seeing if we can touch
# the namenode.
hadoop_namenode_is_accessible = pyunit_utils.hadoop_namenode_is_accessible()
if hadoop_namenode_is_accessible:
hdfs_name_node = pyunit_utils.hadoop_namenode()
if pyunit_utils.cannaryHDFSTest(hdfs_name_node, "/datasets/orc_parser/orc/orc_split_elim.orc"):
print("Your hive-exec version is too old. Orc parser test {0} is "
"skipped.".format("pyunit_INTERNAL_HDFS_import_folder_orc.py"))
pass
else:
mix_folder = "/datasets/csv_orc_same_milsongs"
url_csv1 = "hdfs://{0}{1}".format(hdfs_name_node, mix_folder)
multi_file_mixed = h2o.import_file(url_csv1)
else:
raise EnvironmentError
if __name__ == "__main__":
pyunit_utils.standalone_test(hdfs_orc_parser)
else:
hdfs_orc_parser() | apache-2.0 | -2,969,104,315,063,898,000 | 34.594595 | 103 | 0.582827 | false |
mayapurmedia/tovp | tovp/contributions/migrations/0030_auto_20150514_1332.py | 2 | 1154 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('contributions', '0029_auto_20150514_1331'),
]
operations = [
migrations.AlterModelOptions(
name='contribution',
options={'permissions': (('can_edit_completed', 'Can edit completed'), ('can_deposit', 'Can deposit'))},
),
migrations.AddField(
model_name='contribution',
name='deposited_status',
field=models.CharField(choices=[('not-deposited', 'Not deposited'), ('ready-to-deposit', 'Ready to deposit'), ('deposited', 'Deposited')], verbose_name='Is Deposited', default='not-deposited', max_length=20),
preserve_default=True,
),
migrations.AddField(
model_name='contribution',
name='deposited_status_changed',
field=model_utils.fields.MonitorField(default=django.utils.timezone.now, monitor='deposited_status'),
preserve_default=True,
),
]
| mit | 7,606,827,020,279,090,000 | 35.0625 | 220 | 0.621317 | false |
abdellatifkarroum/odoo | addons/crm_partner_assign/wizard/__init__.py | 389 | 1038 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_forward_to_partner
import crm_channel_interested | agpl-3.0 | -9,222,829,374,188,990,000 | 44.173913 | 78 | 0.612717 | false |
mfalaize/carnet-entretien | compta/forms.py | 2 | 3602 | from django import forms
from django.utils.translation import ugettext_lazy as _
from compta.models import Budget, OperationEpargne, Operation, CategorieEpargne
class BudgetForm(forms.ModelForm):
class Meta:
model = Budget
fields = ['categorie', 'compte_associe', 'budget', 'solde_en_une_fois']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['budget'].widget.attrs['autofocus'] = True
class OperationCategoriesForm(forms.Form):
operation_id = forms.IntegerField(required=True, widget=forms.HiddenInput())
categorie = forms.ChoiceField(required=False, choices=(("", ""),))
redirect = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput())
def __init__(self, post, render_initial=True):
if render_initial:
super().__init__()
else:
super().__init__(post)
operation = post.get('operation')
categories_epargne = post.get('categories_epargne')
redirect = post.get('redirect')
operation_id = post.get('operation_id')
if redirect is not None:
self.fields['redirect'].initial = redirect
if redirect:
self.fields['categorie'].widget = forms.HiddenInput()
self.fields['categorie'].initial = ""
if operation is None and operation_id is not None:
if render_initial:
self.fields['operation_id'].initial = post.get('operation_id')
else:
operation = Operation.objects.get(pk=int(operation_id))
if operation is not None:
operation.load_categorie()
if operation.categorie_id is not None:
self.fields['categorie'].initial = operation.categorie_id
self.fields['operation_id'].initial = operation.pk
if operation.compte.epargne:
self.fields['categorie'].choices += (("-1", _("Partagé entre les différentes catégories")),)
if categories_epargne is None:
categories_epargne = CategorieEpargne.objects.all().order_by('libelle')
for categorie in categories_epargne:
self.fields['categorie'].choices += ((str(categorie.pk).replace(" ", ""), categorie.libelle),)
else:
self.fields['categorie'].choices += (("-1", _("Hors Budget")),)
self.fields['categorie'].choices += (("-2", _("Revenue")),)
self.fields['categorie'].choices += (("-3", _("Avance sur débit(s) futur(s)")),)
if operation.compte.utilisateurs.count() > 1:
for utilisateur in operation.compte.utilisateurs.all():
self.fields['categorie'].choices += (("c" + str(-1000 - utilisateur.pk).replace(' ', ''),
_("Contribution") + " " + utilisateur.first_name),)
for utilisateur in operation.compte.utilisateurs.all():
self.fields['categorie'].choices += (("a" + str(-1000 - utilisateur.pk).replace(' ', ''), _(
"Contribution (avances)") + " " + utilisateur.first_name),)
for budget in operation.compte.budget_set.all():
self.fields['categorie'].choices += ((budget.pk, budget.categorie.libelle),)
else:
self.fields['operation_id'].initial = post.get('operation_id')
self.fields['categorie'].choices = sorted(self.fields['categorie'].choices, key=lambda x: x[1])
| gpl-3.0 | 3,203,191,791,390,964,700 | 47.621622 | 116 | 0.573096 | false |
dyrock/trafficserver | tests/gold_tests/thread_config/thread_32_0.test.py | 2 | 2467 | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
Test.Summary = 'Test that Trafficserver starts with different thread configurations.'
Test.ContinueOnFail = True
ts = Test.MakeATSProcess('ts')
server = Test.MakeOriginServer('server')
Test.testName = ''
request_header = {
'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n',
'timestamp': '1469733493.993',
'body': ''
}
response_header = {
'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n',
'timestamp': '1469733493.993',
'body': ''
}
server.addResponse("sessionfile.log", request_header, response_header)
ts.Disk.records_config.update({
'proxy.config.exec_thread.autoconfig': 0,
'proxy.config.exec_thread.autoconfig.scale': 1.5,
'proxy.config.exec_thread.limit': 32,
'proxy.config.accept_threads': 0,
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'})
ts.Disk.remap_config.AddLine(
'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Setup.CopyAs('check_threads.py', Test.RunDirectory)
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy http://127.0.0.1:{0} http://www.example.com -H "Proxy-Connection: Keep-Alive" --verbose'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(ts)
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.Streams.stderr = 'gold/http_200.gold'
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'python3 check_threads.py -t {0} -e {1} -a {2}'.format(ts.Env['TS_ROOT'], 32, 0)
tr.Processes.Default.ReturnCode = 0
| apache-2.0 | 3,944,221,662,348,823,000 | 36.378788 | 159 | 0.724362 | false |
erwilan/ansible | contrib/inventory/ovirt4.py | 70 | 7682 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
oVirt dynamic inventory script
=================================
Generates dynamic inventory file for oVirt.
Script will return following attributes for each virtual machine:
- id
- name
- host
- cluster
- status
- description
- fqdn
- os_type
- template
- tags
- statistics
- devices
When run in --list mode, virtual machines are grouped by the following categories:
- cluster
- tag
- status
Note: If there is some virtual machine which has has more tags it will be in both tag
records.
Examples:
# Execute update of system on webserver virtual machine:
$ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest"
# Get webserver virtual machine information:
$ contrib/inventory/ovirt4.py --host webserver
Author: Ondra Machacek (@machacekondra)
"""
import argparse
import os
import sys
from collections import defaultdict
try:
import ConfigParser as configparser
except ImportError:
import configparser
try:
import json
except ImportError:
import simplejson as json
try:
import ovirtsdk4 as sdk
import ovirtsdk4.types as otypes
except ImportError:
print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0')
sys.exit(1)
def parse_args():
"""
Create command line parser for oVirt dynamic inventory script.
"""
parser = argparse.ArgumentParser(
description='Ansible dynamic inventory script for oVirt.',
)
parser.add_argument(
'--list',
action='store_true',
default=True,
help='Get data of all virtual machines (default: True).',
)
parser.add_argument(
'--host',
help='Get data of virtual machines running on specified host.',
)
parser.add_argument(
'--pretty',
action='store_true',
default=False,
help='Pretty format (default: False).',
)
return parser.parse_args()
def create_connection():
"""
Create a connection to oVirt engine API.
"""
# Get the path of the configuration file, by default use
# 'ovirt.ini' file in script directory:
default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'ovirt.ini',
)
config_path = os.environ.get('OVIRT_INI_PATH', default_path)
# Create parser and add ovirt section if it doesn't exist:
config = configparser.SafeConfigParser(
defaults={
'ovirt_url': None,
'ovirt_username': None,
'ovirt_password': None,
'ovirt_ca_file': None,
}
)
if not config.has_section('ovirt'):
config.add_section('ovirt')
config.read(config_path)
# Create a connection with options defined in ini file:
return sdk.Connection(
url=config.get('ovirt', 'ovirt_url'),
username=config.get('ovirt', 'ovirt_username'),
password=config.get('ovirt', 'ovirt_password'),
ca_file=config.get('ovirt', 'ovirt_ca_file'),
insecure=config.get('ovirt', 'ovirt_ca_file') is None,
)
def get_dict_of_struct(connection, vm):
"""
Transform SDK Vm Struct type to Python dictionary.
"""
if vm is None:
return dict()
vms_service = connection.system_service().vms_service()
clusters_service = connection.system_service().clusters_service()
vm_service = vms_service.vm_service(vm.id)
devices = vm_service.reported_devices_service().list()
tags = vm_service.tags_service().list()
stats = vm_service.statistics_service().list()
labels = vm_service.affinity_labels_service().list()
groups = clusters_service.cluster_service(
vm.cluster.id
).affinity_groups_service().list()
return {
'id': vm.id,
'name': vm.name,
'host': connection.follow_link(vm.host).name if vm.host else None,
'cluster': connection.follow_link(vm.cluster).name,
'status': str(vm.status),
'description': vm.description,
'fqdn': vm.fqdn,
'os_type': vm.os.type,
'template': connection.follow_link(vm.template).name,
'tags': [tag.name for tag in tags],
'affinity_labels': [label.name for label in labels],
'affinity_groups': [
group.name for group in groups
if vm.name in [vm.name for vm in connection.follow_link(group.vms)]
],
'statistics': dict(
(stat.name, stat.values[0].datum) for stat in stats
),
'devices': dict(
(device.name, [ip.address for ip in device.ips]) for device in devices if device.ips
),
'ansible_host': next((device.ips[0].address for device in devices if device.ips), None)
}
def get_data(connection, vm_name=None):
"""
Obtain data of `vm_name` if specified, otherwise obtain data of all vms.
"""
vms_service = connection.system_service().vms_service()
clusters_service = connection.system_service().clusters_service()
if vm_name:
vm = vms_service.list(search='name=%s' % vm_name) or [None]
data = get_dict_of_struct(
connection=connection,
vm=vm[0],
)
else:
vms = dict()
data = defaultdict(list)
for vm in vms_service.list():
name = vm.name
vm_service = vms_service.vm_service(vm.id)
cluster_service = clusters_service.cluster_service(vm.cluster.id)
# Add vm to vms dict:
vms[name] = get_dict_of_struct(connection, vm)
# Add vm to cluster group:
cluster_name = connection.follow_link(vm.cluster).name
data['cluster_%s' % cluster_name].append(name)
# Add vm to tag group:
tags_service = vm_service.tags_service()
for tag in tags_service.list():
data['tag_%s' % tag.name].append(name)
# Add vm to status group:
data['status_%s' % vm.status].append(name)
# Add vm to affinity group:
for group in cluster_service.affinity_groups_service().list():
if vm.name in [
v.name for v in connection.follow_link(group.vms)
]:
data['affinity_group_%s' % group.name].append(vm.name)
# Add vm to affinity label group:
affinity_labels_service = vm_service.affinity_labels_service()
for label in affinity_labels_service.list():
data['affinity_label_%s' % label.name].append(name)
data["_meta"] = {
'hostvars': vms,
}
return data
def main():
args = parse_args()
connection = create_connection()
print(
json.dumps(
obj=get_data(
connection=connection,
vm_name=args.host,
),
sort_keys=args.pretty,
indent=args.pretty * 2,
)
)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,245,048,312,908,488,000 | 28.320611 | 96 | 0.611299 | false |
ThinkOpen-Solutions/odoo | addons/account/project/wizard/account_analytic_chart.py | 362 | 2100 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_chart(osv.osv_memory):
_name = 'account.analytic.chart'
_description = 'Account Analytic Chart'
_columns = {
'from_date': fields.date('From'),
'to_date': fields.date('To'),
}
def analytic_account_chart_open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result_context = {}
if context is None:
context = {}
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_account_analytic_account_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
data = self.read(cr, uid, ids, [])[0]
if data['from_date']:
result_context.update({'from_date': data['from_date']})
if data['to_date']:
result_context.update({'to_date': data['to_date']})
result['context'] = str(result_context)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 504,554,301,668,686,700 | 41.857143 | 106 | 0.600476 | false |
darktears/chromium-crosswalk | tools/grit/grit/tool/diff_structures.py | 62 | 3923 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The 'grit sdiff' tool.
'''
import os
import getopt
import tempfile
from grit.node import structure
from grit.tool import interface
from grit import constants
from grit import util
# Builds the description for the tool (used as the __doc__
# for the DiffStructures class).
_class_doc = """\
Allows you to view the differences in the structure of two files,
disregarding their translateable content. Translateable portions of
each file are changed to the string "TTTTTT" before invoking the diff program
specified by the P4DIFF environment variable.
Usage: grit sdiff [-t TYPE] [-s SECTION] [-e ENCODING] LEFT RIGHT
LEFT and RIGHT are the files you want to diff. SECTION is required
for structure types like 'dialog' to identify the part of the file to look at.
ENCODING indicates the encoding of the left and right files (default 'cp1252').
TYPE can be one of the following, defaults to 'tr_html':
"""
for gatherer in structure._GATHERERS:
_class_doc += " - %s\n" % gatherer
class DiffStructures(interface.Tool):
__doc__ = _class_doc
def __init__(self):
self.section = None
self.left_encoding = 'cp1252'
self.right_encoding = 'cp1252'
self.structure_type = 'tr_html'
def ShortDescription(self):
return 'View differences without regard for translateable portions.'
def Run(self, global_opts, args):
(opts, args) = getopt.getopt(args, 's:e:t:',
['left_encoding=', 'right_encoding='])
for key, val in opts:
if key == '-s':
self.section = val
elif key == '-e':
self.left_encoding = val
self.right_encoding = val
elif key == '-t':
self.structure_type = val
elif key == '--left_encoding':
self.left_encoding = val
elif key == '--right_encoding':
self.right_encoding == val
if len(args) != 2:
print "Incorrect usage - 'grit help sdiff' for usage details."
return 2
if 'P4DIFF' not in os.environ:
print "Environment variable P4DIFF not set; defaulting to 'windiff'."
diff_program = 'windiff'
else:
diff_program = os.environ['P4DIFF']
left_trans = self.MakeStaticTranslation(args[0], self.left_encoding)
try:
try:
right_trans = self.MakeStaticTranslation(args[1], self.right_encoding)
os.system('%s %s %s' % (diff_program, left_trans, right_trans))
finally:
os.unlink(right_trans)
finally:
os.unlink(left_trans)
def MakeStaticTranslation(self, original_filename, encoding):
"""Given the name of the structure type (self.structure_type), the filename
of the file holding the original structure, and optionally the "section" key
identifying the part of the file to look at (self.section), creates a
temporary file holding a "static" translation of the original structure
(i.e. one where all translateable parts have been replaced with "TTTTTT")
and returns the temporary file name. It is the caller's responsibility to
delete the file when finished.
Args:
original_filename: 'c:\\bingo\\bla.rc'
Return:
'c:\\temp\\werlkjsdf334.tmp'
"""
original = structure._GATHERERS[self.structure_type](original_filename,
extkey=self.section,
encoding=encoding)
original.Parse()
translated = original.Translate(constants.CONSTANT_LANGUAGE, False)
fname = tempfile.mktemp()
with util.WrapOutputStream(open(fname, 'w')) as writer:
writer.write("Original filename: %s\n=============\n\n"
% original_filename)
writer.write(translated) # write in UTF-8
return fname
| bsd-3-clause | 4,675,875,689,353,524,000 | 33.412281 | 80 | 0.654091 | false |
hequn8128/flink | flink-python/pyflink/table/sources.py | 10 | 2202 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
from pyflink.table.types import DataType, _to_java_type
from pyflink.util import utils
__all__ = ['TableSource', 'CsvTableSource']
class TableSource(object):
"""
Defines a table from an external system or location.
"""
def __init__(self, j_table_source):
self._j_table_source = j_table_source
class CsvTableSource(TableSource):
"""
A :class:`TableSource` for simple CSV files with a
(logically) unlimited number of fields.
:param source_path: The path to the CSV file.
:param field_names: The names of the table fields.
:param field_types: The types of the table fields.
"""
def __init__(self, source_path, field_names, field_types):
# type: (str, list[str], list[DataType]) -> None
gateway = get_gateway()
j_field_names = utils.to_jarray(gateway.jvm.String, field_names)
j_field_types = utils.to_jarray(gateway.jvm.TypeInformation,
[_to_java_type(field_type)
for field_type in field_types])
super(CsvTableSource, self).__init__(
gateway.jvm.CsvTableSource(source_path, j_field_names, j_field_types))
| apache-2.0 | -5,193,428,207,122,846,000 | 40.54717 | 82 | 0.629428 | false |
jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/test/test_netrc.py | 50 | 4636 | import netrc, os, unittest, sys, textwrap
from test import test_support
temp_filename = test_support.TESTFN
class NetrcTestCase(unittest.TestCase):
def make_nrc(self, test_data):
test_data = textwrap.dedent(test_data)
mode = 'w'
if sys.platform != 'cygwin':
mode += 't'
with open(temp_filename, mode) as fp:
fp.write(test_data)
self.addCleanup(os.unlink, temp_filename)
return netrc.netrc(temp_filename)
def test_default(self):
nrc = self.make_nrc("""\
machine host1.domain.com login log1 password pass1 account acct1
default login log2 password pass2
""")
self.assertEqual(nrc.hosts['host1.domain.com'],
('log1', 'acct1', 'pass1'))
self.assertEqual(nrc.hosts['default'], ('log2', None, 'pass2'))
def test_macros(self):
nrc = self.make_nrc("""\
macdef macro1
line1
line2
macdef macro2
line3
line4
""")
self.assertEqual(nrc.macros, {'macro1': ['line1\n', 'line2\n'],
'macro2': ['line3\n', 'line4\n']})
def _test_passwords(self, nrc, passwd):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['host.domain.com'], ('log', 'acct', passwd))
def test_password_with_leading_hash(self):
self._test_passwords("""\
machine host.domain.com login log password #pass account acct
""", '#pass')
def test_password_with_trailing_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pass# account acct
""", 'pass#')
def test_password_with_internal_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pa#ss account acct
""", 'pa#ss')
def _test_comment(self, nrc, passwd='pass'):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['foo.domain.com'], ('bar', None, passwd))
self.assertEqual(nrc.hosts['bar.domain.com'], ('foo', None, 'pass'))
def test_comment_before_machine_line(self):
self._test_comment("""\
# comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_before_machine_line_no_space(self):
self._test_comment("""\
#comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_before_machine_line_hash_only(self):
self._test_comment("""\
#
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line(self):
self._test_comment("""\
machine foo.domain.com login bar password pass # comment
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line_no_space(self):
self._test_comment("""\
machine foo.domain.com login bar password pass #comment
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line_pass_has_hash(self):
self._test_comment("""\
machine foo.domain.com login bar password #pass #comment
machine bar.domain.com login foo password pass
""", '#pass')
@unittest.skipUnless(os.name == 'posix', 'POSIX only test')
def test_security(self):
# This test is incomplete since we are normally not run as root and
# therefore can't test the file ownership being wrong.
d = test_support.TESTFN
os.mkdir(d)
self.addCleanup(test_support.rmtree, d)
fn = os.path.join(d, '.netrc')
with open(fn, 'wt') as f:
f.write("""\
machine foo.domain.com login bar password pass
default login foo password pass
""")
with test_support.EnvironmentVarGuard() as environ:
environ.set('HOME', d)
os.chmod(fn, 0600)
nrc = netrc.netrc()
self.assertEqual(nrc.hosts['foo.domain.com'],
('bar', None, 'pass'))
os.chmod(fn, 0o622)
self.assertRaises(netrc.NetrcParseError, netrc.netrc)
def test_main():
test_support.run_unittest(NetrcTestCase)
if __name__ == "__main__":
test_main()
| mit | 8,679,122,582,430,099,000 | 34.661538 | 79 | 0.566221 | false |
AndresVillan/pyafipws | formatos/formato_sql.py | 4 | 15333 | #!/usr/bin/python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Módulo para manejo de archivos SQL"
__author__ = "Mariano Reingart ([email protected])"
__copyright__ = "Copyright (C) 2014 Mariano Reingart"
__license__ = "GPL 3.0"
from decimal import Decimal
DEBUG = False
CAE_NULL = None
FECHA_VTO_NULL = None
RESULTADO_NULL = None
NULL = None
def esquema_sql(tipos_registro, conf={}):
from formato_txt import A, N, I
for tabla, formato in tipos_registro:
sql = []
sql.append("CREATE TABLE %s (" % tabla)
if tabla!='encabezado':
# agrego id como fk
id = [('id', 15, N)]
else:
id = []
for (clave, longitud, tipo) in id+formato:
clave_orig = clave
if conf:
if tabla == 'encabezado':
clave = conf["encabezado"].get(clave, clave)
if tabla == 'detalle':
clave = conf["detalle"].get(clave, clave)
if tabla == 'iva':
clave = conf["iva"].get(clave, clave)
if tabla == 'tributo':
clave = conf["tributo"].get(clave, clave)
if tabla == 'cmp_asoc':
clave = conf["cmp_asoc"].get(clave, clave)
if tabla == 'permiso':
clave = conf["permiso"].get(clave, clave)
if isinstance(longitud, (tuple, list)):
longitud, decimales = longitud
else:
decimales = 2
sql.append (" %s %s %s%s%s" % (
clave,
{N: 'INTEGER', I: 'NUMERIC', A: 'VARCHAR'}[tipo],
{I: "(%s, %s)" % (longitud, decimales), A: '(%s)' % longitud, N: ''}[tipo],
clave == 'id' and (tabla=='encabezado' and " PRIMARY KEY" or " FOREING KEY encabezado") or "",
formato[-1][0]!=clave_orig and "," or ""))
sql.append(")")
sql.append(";")
if DEBUG: print '\n'.join(sql)
yield '\n'.join(sql)
def configurar(schema):
tablas = {}
campos = {}
campos_rev = {}
if not schema:
for tabla in "encabezado", "detalle", "cmp_asoc", "permiso", "tributo", "iva":
tablas[tabla] = tabla
campos[tabla] = {"id": "id"}
campos_rev[tabla] = dict([(v, k) for k, v in campos[tabla].items()])
return tablas, campos, campos_rev
def ejecutar(cur, sql, params=None):
print sql, params
if params is None:
return cur.execute(sql)
else:
return cur.execute(sql, params)
def max_id(db, schema={}):
cur = db.cursor()
tablas, campos, campos_rev = configurar(schema)
query = ("SELECT MAX(%%(id)s) FROM %(encabezado)s" % tablas) % campos["encabezado"]
if DEBUG: print "ejecutando",query
ret = None
try:
ejecutar(cur, query)
for row in cur:
ret = row[0]
if not ret:
ret = 0
print "MAX_ID = ", ret
return ret
finally:
cur.close()
def redondear(formato, clave, valor):
from formato_txt import A, N, I
# corregir redondeo (aparentemente sqlite no guarda correctamente los decimal)
import decimal
long = [fmt[1] for fmt in formato if fmt[0]==clave]
tipo = [fmt[2] for fmt in formato if fmt[0]==clave]
if not tipo:
return valor
tipo = tipo[0]
if DEBUG: print "tipo", tipo, clave, valor, long
if valor is None:
return None
if valor == "":
return ""
if tipo == A:
return valor
if tipo == N:
return int(valor)
if isinstance(valor, (int, float)):
valor = str(valor)
if isinstance(valor, basestring):
valor = Decimal(valor)
if long and isinstance(long[0], (tuple, list)):
decimales = Decimal('1') / Decimal(10**(long[0][1]))
else:
decimales = Decimal('.01')
valor1 = valor.quantize(decimales, rounding=decimal.ROUND_DOWN)
if valor != valor1 and DEBUG:
print "REDONDEANDO ", clave, decimales, valor, valor1
return valor1
def escribir(facts, db, schema={}, commit=True):
from formato_txt import ENCABEZADO, DETALLE, TRIBUTO, IVA, CMP_ASOC, PERMISO, DATO
tablas, campos, campos_rev = configurar(schema)
cur = db.cursor()
try:
for dic in facts:
if not 'id' in dic:
dic['id'] = max_id(db, schema={}) + 1
query = "INSERT INTO %(encabezado)s (%%s) VALUES (%%s)" % tablas
fields = ','.join([campos["encabezado"].get(k, k) for k,t,n in ENCABEZADO if k in dic])
values = ','.join(['?' for k,t,n in ENCABEZADO if k in dic])
if DEBUG: print "Ejecutando2: %s %s" % (query % (fields, values), [dic[k] for k,t,n in ENCABEZADO if k in dic])
ejecutar(cur, query % (fields, values), [dic[k] for k,t,n in ENCABEZADO if k in dic])
query = ("INSERT INTO %(detalle)s (%%(id)s, %%%%s) VALUES (?, %%%%s)" % tablas) % campos["detalle"]
for item in dic['detalles']:
fields = ','.join([campos["detalle"].get(k, k) for k,t,n in DETALLE if k in item])
values = ','.join(['?' for k,t,n in DETALLE if k in item])
if DEBUG: print "Ejecutando: %s %s" % (query % (fields, values), [dic['id']] + [item[k] for k,t,n in DETALLE if k in item])
ejecutar(cur, query % (fields, values), [dic['id']] + [item[k] for k,t,n in DETALLE if k in item])
if 'cbtes_asoc' in dic and tablas["cmp_asoc"]:
query = ("INSERT INTO %(cmp_asoc)s (%%(id)s, %%%%s) VALUES (?, %%%%s)" % tablas) % campos["cmp_asoc"]
for item in dic['cbtes_asoc']:
fields = ','.join([campos["cmp_asoc"].get(k, k) for k,t,n in CMP_ASOC if k in item])
values = ','.join(['?' for k,t,n in CMP_ASOC if k in item])
if DEBUG: print "Ejecutando: %s %s" % (query % (fields, values), [dic['id']] + [item[k] for k,t,n in CMP_ASOC if k in item])
ejecutar(cur, query % (fields, values), [dic['id']] + [item[k] for k,t,n in CMP_ASOC if k in item])
if 'permisos' in dic:
query = ("INSERT INTO %(permiso)s (%%(id)s, %%%%s) VALUES (?, %%%%s)" % tablas) % campos["permiso"]
for item in dic['permisos']:
fields = ','.join([campos["permiso"].get(k, k) for k,t,n in PERMISO if k in item])
values = ','.join(['?' for k,t,n in PERMISO if k in item])
if DEBUG: print "Ejecutando: %s %s" % (query % (fields, values), [dic['id']] + [item[k] for k,t,n in PERMISO if k in item])
ejecutar(cur, query % (fields, values), [dic['id']] + [item[k] for k,t,n in PERMISO if k in item])
if 'tributos' in dic:
query = ("INSERT INTO %(tributo)s (%%(id)s, %%%%s) VALUES (?, %%%%s)" % tablas) % campos["tributo"]
for item in dic['tributos']:
fields = ','.join([campos["tributo"].get(k, k) for k,t,n in TRIBUTO if k in item])
values = ','.join(['?' for k,t,n in TRIBUTO if k in item])
if DEBUG: print "Ejecutando: %s %s" % (query % (fields, values), [dic['id']] + [item[k] for k,t,n in TRIBUTO if k in item])
ejecutar(cur, query % (fields, values), [dic['id']] + [item[k] for k,t,n in TRIBUTO if k in item])
if 'ivas' in dic:
query = ("INSERT INTO %(iva)s (%%(id)s, %%%%s) VALUES (?, %%%%s)" % tablas) % campos["iva"]
for item in dic['ivas']:
fields = ','.join([campos["iva"].get(k, k) for k,t,n in IVA if k in item])
values = ','.join(['?' for k,t,n in IVA if k in item])
if DEBUG: print "Ejecutando: %s %s" % (query % (fields, values), [dic['id']] + [item[k] for k,t,n in IVA if k in item])
ejecutar(cur, query % (fields, values), [dic['id']] + [item[k] for k,t,n in IVA if k in item])
if commit:
db.commit()
finally:
pass
def modificar(fact, db, schema={}, webservice="wsfev1", ids=None, conf_db={}):
from formato_txt import ENCABEZADO, DETALLE, TRIBUTO, IVA, CMP_ASOC, PERMISO, DATO
update = ['cae', 'fecha_vto', 'resultado', 'reproceso', 'motivo_obs', 'err_code', 'err_msg', 'cbte_nro']
tablas, campos, campos_rev = configurar(schema)
cur = db.cursor()
if fact['cae']=='NULL' or fact['cae']=='' or fact['cae']==None:
fact['cae'] = CAE_NULL
fact['fecha_vto'] = FECHA_VTO_NULL
if 'null' in conf_db and fact['resultado']==None or fact['resultado']=='':
fact['resultado'] = RESULTADO_NULL
for k in ['reproceso', 'motivo_obs', 'err_code', 'err_msg']:
if 'null' in conf_db and k in fact and fact[k]==None or fact[k]=='':
if DEBUG: print k, "NULL"
fact[k] = NULL
try:
query = ("UPDATE %(encabezado)s SET %%%%s WHERE %%(id)s=?" % tablas) % campos["encabezado"]
fields = [campos["encabezado"].get(k, k) for k,t,n in ENCABEZADO if k in update and k in fact]
values = [fact[k] for k,t,n in ENCABEZADO if k in update and k in fact]
query = query % ','.join(["%s=?" % f for f in fields])
if DEBUG: print query, values+[fact['id']]
ejecutar(cur, query, values+[fact['id']] )
db.commit()
except:
raise
finally:
pass
def leer(db, schema={}, webservice="wsfev1", ids=None):
from formato_txt import ENCABEZADO, DETALLE, TRIBUTO, IVA, CMP_ASOC, PERMISO, DATO
tablas, campos, campos_rev = configurar(schema)
cur = db.cursor()
if not ids:
query = ("SELECT * FROM %(encabezado)s WHERE (%%(resultado)s IS NULL OR %%(resultado)s='' OR %%(resultado)s=' ') AND (%%(id)s IS NOT NULL) AND %%(webservice)s=? ORDER BY %%(tipo_cbte)s, %%(punto_vta)s, %%(cbte_nro)s" % tablas) % campos["encabezado"]
ids = [webservice]
else:
query = ("SELECT * FROM %(encabezado)s WHERE " % tablas) + " OR ".join(["%(id)s=?" % campos["encabezado"] for id in ids])
if DEBUG: print "ejecutando",query, ids
try:
ejecutar(cur, query, ids)
rows = cur.fetchall()
description = cur.description
for row in rows:
detalles = []
encabezado = {}
for i, k in enumerate(description):
val = row[i]
if isinstance(val,str):
val = val.decode(CHARSET)
if isinstance(val,basestring):
val = val.strip()
key = campos_rev["encabezado"].get(k[0], k[0].lower())
val = redondear(ENCABEZADO, key, val)
encabezado[key] = val
print encabezado
detalles = []
if DEBUG: print ("SELECT * FROM %(detalle)s WHERE %%(id)s = ?" % tablas) % campos["detalle"], [encabezado['id']]
ejecutar(cur, ("SELECT * FROM %(detalle)s WHERE %%(id)s = ?" % tablas) % campos["detalle"], [encabezado['id']])
for it in cur.fetchall():
detalle = {}
for i, k in enumerate(cur.description):
val = it[i]
if isinstance(val,str):
val = val.decode(CHARSET)
key = campos_rev["detalle"].get(k[0], k[0].lower())
val = redondear(DETALLE, key, val)
detalle[key] = val
detalles.append(detalle)
encabezado['detalles'] = detalles
cmps_asoc = []
if DEBUG: print ("SELECT * FROM %(cmp_asoc)s WHERE %%(id)s = ?" % tablas) % campos["cmp_asoc"], [encabezado['id']]
ejecutar(cur, ("SELECT * FROM %(cmp_asoc)s WHERE %%(id)s = ?" % tablas) % campos["cmp_asoc"], [encabezado['id']])
for it in cur.fetchall():
cmp_asoc = {}
for i, k in enumerate(cur.description):
val = it[i]
key = campos_rev["cmp_asoc"].get(k[0], k[0].lower())
cmp_asoc[key] = val
cmps_asoc.append(cmp_asoc)
if cmps_asoc:
encabezado['cbtes_asoc'] = cmps_asoc
permisos = []
if DEBUG: print ("SELECT * FROM %(permiso)s WHERE %%(id)s = ?" % tablas) % campos["permiso"], [encabezado['id']]
ejecutar(cur, ("SELECT * FROM %(permiso)s WHERE %%(id)s = ?" % tablas) % campos["permiso"], [encabezado['id']])
for it in cur.fetchall():
permiso = {}
for i, k in enumerate(cur.description):
val = it[i]
key = campos_rev["permiso"].get(k[0], k[0].lower())
permiso[key] = val
permisos.append(permiso)
if permisos:
encabezado['permisos'] = permisos
ivas = []
if DEBUG: print ("SELECT * FROM %(iva)s WHERE %%(id)s = ?" % tablas) % campos["iva"], [encabezado['id']]
ejecutar(cur, ("SELECT * FROM %(iva)s WHERE %%(id)s = ?" % tablas) % campos["iva"], [encabezado['id']])
for it in cur.fetchall():
iva = {}
for i, k in enumerate(cur.description):
val = it[i]
key = campos_rev["iva"].get(k[0], k[0].lower())
val = redondear(IVA, key, val)
iva[key] = val
ivas.append(iva)
if ivas:
encabezado['ivas'] = ivas
tributos = []
if DEBUG: print ("SELECT * FROM %(tributo)s WHERE %%(id)s = ?" % tablas) % campos["tributo"], [encabezado['id']]
ejecutar(cur, ("SELECT * FROM %(tributo)s WHERE %%(id)s = ?" % tablas) % campos["tributo"], [encabezado['id']])
for it in cur.fetchall():
tributo = {}
for i, k in enumerate(cur.description):
val = it[i]
key = campos_rev["tributo"].get(k[0], k[0].lower())
val = redondear(TRIBUTO, key, val)
tributo[key] = val
tributos.append(tributo)
if tributos:
encabezado['tributos'] = tributos
yield encabezado
db.commit()
finally:
cur.close()
def ayuda():
print "-- Formato:"
from formato_txt import ENCABEZADO, DETALLE, TRIBUTO, IVA, CMP_ASOC, DATO, PERMISO
tipos_registro = [
('encabezado', ENCABEZADO),
('detalle', DETALLE),
('tributo', TRIBUTO),
('iva', IVA),
('cmp_asoc', CMP_ASOC),
('permiso', PERMISO),
('dato', DATO),
]
print "-- Esquema:"
for sql in esquema_sql(tipos_registro):
print sql
if __name__ == "__main__":
ayuda()
| gpl-3.0 | -4,232,584,886,085,562,000 | 44.230088 | 257 | 0.515294 | false |
diagramsoftware/odoo | addons/edi/models/__init__.py | 442 | 1116 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import edi
import res_partner
import res_company
import res_currency
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,092,458,624,888,739,300 | 40.333333 | 78 | 0.62724 | false |
kermitfr/kermit-webui | src/webui/platforms/oc4j/applications.py | 1 | 2735 | '''
Created on Oct 25, 2011
@author: mmornati
'''
from webui.servers.models import Server
from guardian.shortcuts import get_objects_for_user
import logging
from webui.platforms.oc4j.utils import extract_appli_info, check_contains,\
extract_appli_details
from webui.platforms.abstracts import Application
from webui.platforms.oc4j import settings
from webui.platforms.platforms import platforms
from webui.servers import utils
logger = logging.getLogger(__name__)
class OC4JApplication(Application):
def getApplications(self, user):
servers = utils.extract_user_servers(user)
#Retrieving applilist for any server controlled by kermit
applications = []
if servers:
for server in servers:
environment = self.extract_environment_level(server)
appli = extract_appli_info(server.hostname, environment)
if appli:
for app in appli:
extracted = check_contains(applications, app)
if extracted:
extracted["deploy"] = extracted["deploy"] + 1
extracted["servers"].append(app["servers"])
else:
applications.append(app)
return applications
def getApplicationsPath(self, user, server_path):
servers = utils.extract_user_servers_in_path(user, server_path)
#Retrieving applilist for any server controlled by kermit
applications = []
if servers:
for server in servers:
environment = self.extract_environment_level(server)
appli = extract_appli_info(server.hostname, environment)
if appli:
for app in appli:
extracted = check_contains(applications, app)
if extracted:
extracted["deploy"] = extracted["deploy"] + 1
extracted["servers"].append(app["servers"])
else:
applications.append(app)
return applications
def getAppliInfo(self, user, appname):
servers = utils.extract_user_servers(user)
#Retrieving applilist for any server controlled by kermit
applications = []
if servers:
for server in servers:
environment = self.extract_environment_level(server)
appli = extract_appli_details(server.hostname, environment, appname)
if appli:
applications.extend(appli)
return applications
platforms.register(OC4JApplication, settings.PLATFORM_NAME) | gpl-3.0 | 2,970,411,000,821,777,400 | 38.652174 | 84 | 0.590859 | false |
GovCERT-CZ/dionaea | modules/python/util/logsql2postgres.py | 3 | 10245 | #!/opt/dionaea/bin/python3
# sudo su postgres
# createdb --owner=xmpp logsql
# psql -U xmpp logsql < modules/python/util/xmpp/pg_schema.sql
import sqlite3
import postgresql.driver as pg_driver
import optparse
def copy(name, lite, pg, src, dst):
print("[+] {0}".format(name))
pg.execute("DELETE FROM {0}".format(dst['table']))
offset = 0
limit = 10000
insert = pg.prepare(dst['query'])
while True:
result = lite.execute(src['query'].format(limit, offset))
r = 0
result = result.fetchall()
r = len(result)
insert.load_rows(result)
# print("{0} {1} {2}".format(offset, limit, r))
if r != limit:
# update the sequence if we inserted rows
if offset + r != 0:
pg.execute(
"SELECT setval('{0}',{1})".format(dst['seq'], offset + r))
break
offset += limit
cando = {
'connections' : ({
# FIXME postgres does not know connection_type pending
# connection_type is an enum, so this may get messy
'query' : """SELECT
connection,
connection_type,
connection_transport,
datetime(connection_timestamp, 'unixepoch') || ' UTC' AS connection_timestamp,
connection_parent,
connection_root,
ifnull(nullif(local_host,''),'0.0.0.0'),
local_port,
ifnull(nullif(remote_host,''),'0.0.0.0'),
remote_port,
connection_protocol,
remote_hostname FROM connections WHERE connection_type != 'pending' LIMIT {:d} OFFSET {:d} \n"""
},
{
'table' : 'dionaea.connections',
'seq' : "dionaea.connections_connection_seq",
'query' : """INSERT INTO dionaea.connections
(connection,
connection_type,
connection_transport,
connection_timestamp,
connection_parent,
connection_root,
local_host,
local_port,
remote_host,
remote_port,
connection_protocol,
remote_hostname)
VALUES
($1,$2,$3,$4::text::timestamp,$5,$6,$7::text::inet,$8,$9::text::inet,$10,$11,$12)""",
}),
'dcerpcbinds': ({
'query' : """SELECT
dcerpcbind,
connection,
dcerpcbind_uuid,
dcerpcbind_transfersyntax FROM dcerpcbinds LIMIT {:d} OFFSET {:d} \n"""
},
{
'table' : 'dionaea.dcerpcbinds',
'seq' : "dionaea.dcerpcbinds_dcerpcbind_seq",
'query' : """INSERT INTO dionaea.dcerpcbinds
(dcerpcbind,
connection,
dcerpcbind_uuid,
dcerpcbind_transfersyntax)
VALUES
($1,$2,$3,$4)""",
}),
'dcerpcrequests' : ({
'query' : """SELECT
dcerpcrequest,
connection,
dcerpcrequest_uuid,
dcerpcrequest_opnum FROM dcerpcrequests LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.dcerpcrequests',
'seq' : "dionaea.dcerpcrequests_dcerpcrequest_seq",
'query' : """INSERT INTO dionaea.dcerpcrequests
(dcerpcrequest,
connection,
dcerpcrequest_uuid,
dcerpcrequest_opnum)
VALUES
($1,$2,$3,$4)""",
}),
'dcerpcservices' : ({
'query' : """SELECT
dcerpcservice,
dcerpcservice_uuid,
dcerpcservice_name FROM dcerpcservices LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.dcerpcservices',
'seq' : "dionaea.dcerpcservices_dcerpcservice_seq",
'query' : """INSERT INTO dionaea.dcerpcservices
(dcerpcservice,
dcerpcservice_uuid,
dcerpcservice_name)
VALUES
($1,$2,$3)""",
}),
'dcerpcserviceops' : ({
'query' : """SELECT
dcerpcserviceop,
dcerpcservice,
dcerpcserviceop_name,
dcerpcserviceop_opnum,
dcerpcserviceop_vuln
FROM dcerpcserviceops LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.dcerpcserviceops',
'seq' : "dionaea.dcerpcserviceops_dcerpcserviceop_seq",
'query' : """INSERT INTO dionaea.dcerpcserviceops
(dcerpcserviceop,
dcerpcservice,
dcerpcserviceop_name,
dcerpcserviceop_opnum,
dcerpcserviceop_vuln)
VALUES
($1,$2,$3,$4,$5)""",
}),
'downloads' : ({
'query' : """SELECT
download,
connection,
download_md5_hash,
download_url FROM downloads LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.downloads',
'seq' : "dionaea.dcerpcrequests_dcerpcrequest_seq",
'query' : """INSERT INTO dionaea.downloads
(download,
connection,
download_md5_hash,
download_url)
VALUES
($1,$2,$3,$4)""",
}),
'emu_profiles' : ({
'query' : """SELECT
emu_profile,
connection,
emu_profile_json FROM emu_profiles LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.emu_profiles',
'seq' : "dionaea.emu_profiles_emu_profile_seq",
'query' : """INSERT INTO dionaea.emu_profiles
(emu_profile,
connection,
emu_profile_json)
VALUES
($1,$2,$3)""",
}),
'emu_services' : ({
'query' : """SELECT
emu_serivce,
connection,
emu_service_url FROM emu_services LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.emu_services',
'seq' : "dionaea.emu_services_emu_service_seq",
'query' : """INSERT INTO dionaea.emu_services
(emu_service,
connection,
emu_service_url)
VALUES
($1,$2,$3)""",
}),
'offers' : ({
'query' : """SELECT
offer,
connection,
offer_url FROM offers LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.offers',
'seq' : "dionaea.offers_offer_seq",
'query' : """INSERT INTO dionaea.offers
(offer,
connection,
offer_url)
VALUES
($1,$2,$3)""",
}),
'p0fs' : (
{ 'query' : """SELECT
p0f,
connection,
p0f_genre,
p0f_link,
p0f_detail,
p0f_uptime,
p0f_tos,
p0f_dist,
p0f_nat,
p0f_fw FROM p0fs LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.p0fs',
'seq' : "dionaea.p0fs_p0f_seq",
'query' : """INSERT INTO dionaea.p0fs
( p0f,
connection,
p0f_genre,
p0f_link,
p0f_detail,
p0f_uptime,
p0f_tos,
p0f_dist,
p0f_nat,
p0f_fw)
VALUES
($1,$2,$3,$4,$5,$6,$7,$8,$9,$10)""",
}),
'virustotals': (
{ 'query' : """SELECT
virustotal,
virustotal_md5_hash,
datetime(virustotal_timestamp, 'unixepoch') || ' UTC' AS virustotal_timestamp,
virustotal_permalink
FROM virustotals LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.virustotals',
'seq' : "dionaea.virustotals_virustotal_seq",
'query' : """INSERT INTO dionaea.virustotals
(
virustotal,
virustotal_md5_hash,
virustotal_timestamp,
virustotal_permalink
)
VALUES
($1,$2,$3::text::timestamptz,$4)""",
}),
'virustotalscans': (
{ 'query' : """SELECT
virustotalscan,
virustotal,
virustotalscan_scanner,
nullif(virustotalscan_result,'')
FROM virustotalscans LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.virustotalscans',
'seq' : "dionaea.virustotalscans_virustotalscan_seq",
'query' : """INSERT INTO dionaea.virustotalscans
(
virustotalscan,
virustotal,
virustotalscan_scanner,
virustotalscan_result
)
VALUES
($1,$2,$3,$4)""",
}),
# x
'mssql_fingerprints': (
{ 'query' : """SELECT
mssql_fingerprint,
connection,
mssql_fingerprint_hostname,
mssql_fingerprint_appname,
mssql_fingerprint_cltintname FROM mssql_fingerprints LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.mssql_fingerprints',
'seq' : "dionaea.mssql_fingerprints_mssql_fingerprint_seq",
'query' : """INSERT INTO dionaea.mssql_fingerprints
(
mssql_fingerprint,
connection,
mssql_fingerprint_hostname,
mssql_fingerprint_appname,
mssql_fingerprint_cltintname
)
VALUES
($1,$2,$3,$4,$5)""",
}),
'mssql_commands': (
{ 'query' : """SELECT
mssql_command,
connection,
mssql_command_status,
mssql_command_cmd FROM mssql_commands LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.mssql_commands',
'seq' : "dionaea.mssql_commands_mssql_command_seq",
'query' : """INSERT INTO dionaea.mssql_commands
(
mssql_command,
connection,
mssql_command_status,
mssql_command_cmd
)
VALUES
($1,$2,$3,$4)""",
}),
'logins': (
{ 'query' : """SELECT
login,
connection,
login_username,
login_password FROM logins LIMIT {:d} OFFSET {:d}"""
},
{ 'table' : 'dionaea.logins',
'seq' : "dionaea.logins_login_seq",
'query' : """INSERT INTO dionaea.logins
(
login,
connection,
login_username,
login_password
)
VALUES
($1,$2,$3,$4)""",
})
}
if __name__ == "__main__":
p = optparse.OptionParser()
p.add_option('-s', '--database-host', dest='database_host',
help='localhost:5432', type="string", action="store")
p.add_option('-d', '--database', dest='database',
help='for example xmpp', type="string", action="store")
p.add_option('-u', '--database-user', dest='database_user',
help='for example xmpp', type="string", action="store")
p.add_option('-p', '--database-password', dest='database_password',
help='the database users password', type="string", action="store")
p.add_option('-f', '--sqlite-file', dest='sqlite_file',
help='path to sqlite db', type="string", action="store")
(options, args) = p.parse_args()
if len(args) == 0:
print("use {} as args".format( ' '.join(cando.keys()) ) )
db = {}
db['sqlite'] = {}
db['sqlite']['dbh'] = sqlite3.connect(options.sqlite_file)
db['sqlite']['cursor'] = db['sqlite']['dbh'].cursor()
db['pg'] = {}
db['pg']['dbh'] = pg_driver.connect(
user = options.database_user,
password = options.database_password,
database = options.database,
host = options.database_host,
port = 5432)
for i in args:
if i in cando:
copy(i,
db['sqlite']['cursor'],
db['pg']['dbh'],
cando[i][0],
cando[i][1])
# db['pg']['dbh'].commit()
| gpl-2.0 | 4,433,618,170,527,844,000 | 25.472868 | 98 | 0.561835 | false |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/tools/perf/page_sets/extension_profile_shared_state.py | 8 | 1202 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil
from profile_creators import extension_profile_extender
from profile_creators import profile_generator
from telemetry.page import shared_page_state
class ExtensionProfileSharedState(shared_page_state.SharedPageState):
"""Shared state tied with extension profile.
Generates extension profile on initialization.
"""
def __init__(self, test, finder_options, story_set):
super(ExtensionProfileSharedState, self).__init__(
test, finder_options, story_set)
generator = profile_generator.ProfileGenerator(
extension_profile_extender.ExtensionProfileExtender,
'extension_profile')
self._out_dir = generator.Run(finder_options)
if self._out_dir:
finder_options.browser_options.profile_dir = self._out_dir
else:
finder_options.browser_options.dont_override_profile = True
def TearDownState(self):
"""Clean up generated profile directory."""
super(ExtensionProfileSharedState, self).TearDownState()
if self._out_dir:
shutil.rmtree(self._out_dir)
| mit | 7,033,498,901,055,003,000 | 34.352941 | 72 | 0.742097 | false |
namccart/gnuradio | grc/python/epy_block_io.py | 5 | 2648 |
import inspect
import collections
from gnuradio import gr
import pmt
TYPE_MAP = {
'complex64': 'complex', 'complex': 'complex',
'float32': 'float', 'float': 'float',
'int32': 'int', 'uint32': 'int',
'int16': 'short', 'uint16': 'short',
'int8': 'byte', 'uint8': 'byte',
}
BlockIO = collections.namedtuple('BlockIO', 'name cls params sinks sources doc')
def _ports(sigs, msgs):
ports = list()
for i, dtype in enumerate(sigs):
port_type = TYPE_MAP.get(dtype.name, None)
if not port_type:
raise ValueError("Can't map {0:!r} to GRC port type".format(dtype))
ports.append((str(i), port_type))
for msg_key in msgs:
if msg_key == 'system':
continue
ports.append((msg_key, 'message'))
return ports
def _blk_class(source_code):
ns = {}
try:
exec source_code in ns
except Exception as e:
raise ValueError("Can't interpret source code: " + str(e))
for var in ns.itervalues():
if inspect.isclass(var)and issubclass(var, gr.gateway.gateway_block):
return var
raise ValueError('No python block class found in code')
def extract(cls):
if not inspect.isclass(cls):
cls = _blk_class(cls)
spec = inspect.getargspec(cls.__init__)
defaults = map(repr, spec.defaults or ())
doc = cls.__doc__ or cls.__init__.__doc__ or ''
cls_name = cls.__name__
if len(defaults) + 1 != len(spec.args):
raise ValueError("Need all __init__ arguments to have default values")
try:
instance = cls()
except Exception as e:
raise RuntimeError("Can't create an instance of your block: " + str(e))
name = instance.name()
params = list(zip(spec.args[1:], defaults))
sinks = _ports(instance.in_sig(),
pmt.to_python(instance.message_ports_in()))
sources = _ports(instance.out_sig(),
pmt.to_python(instance.message_ports_out()))
return BlockIO(name, cls_name, params, sinks, sources, doc)
if __name__ == '__main__':
blk_code = """
import numpy as np
from gnuradio import gr
import pmt
class blk(gr.sync_block):
def __init__(self, param1=None, param2=None):
"Test Docu"
gr.sync_block.__init__(
self,
name='Embedded Python Block',
in_sig = (np.float32,),
out_sig = (np.float32,np.complex64,),
)
self.message_port_register_in(pmt.intern('msg_in'))
self.message_port_register_out(pmt.intern('msg_out'))
def work(self, inputs_items, output_items):
return 10
"""
print extract(blk_code)
| gpl-3.0 | 3,098,903,623,530,642,000 | 26.873684 | 80 | 0.588746 | false |
garg10may/youtube-dl | youtube_dl/extractor/telemb.py | 177 | 2964 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import remove_start
class TeleMBIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?telemb\.be/(?P<display_id>.+?)_d_(?P<id>\d+)\.html'
_TESTS = [
{
'url': 'http://www.telemb.be/mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-_d_13466.html',
'md5': 'f45ea69878516ba039835794e0f8f783',
'info_dict': {
'id': '13466',
'display_id': 'mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-',
'ext': 'mp4',
'title': 'Mons - Cook with Danielle : des cours de cuisine en anglais ! - Les reportages',
'description': 'md5:bc5225f47b17c309761c856ad4776265',
'thumbnail': 're:^http://.*\.(?:jpg|png)$',
}
},
{
# non-ASCII characters in download URL
'url': 'http://telemb.be/les-reportages-havre-incendie-mortel_d_13514.html',
'md5': '6e9682736e5ccd4eab7f21e855350733',
'info_dict': {
'id': '13514',
'display_id': 'les-reportages-havre-incendie-mortel',
'ext': 'mp4',
'title': 'Havré - Incendie mortel - Les reportages',
'description': 'md5:5e54cb449acb029c2b7734e2d946bd4a',
'thumbnail': 're:^http://.*\.(?:jpg|png)$',
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
formats = []
for video_url in re.findall(r'file\s*:\s*"([^"]+)"', webpage):
fmt = {
'url': video_url,
'format_id': video_url.split(':')[0]
}
rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', video_url)
if rtmp:
fmt.update({
'play_path': rtmp.group('playpath'),
'app': rtmp.group('app'),
'player_url': 'http://p.jwpcdn.com/6/10/jwplayer.flash.swf',
'page_url': 'http://www.telemb.be',
'preference': -1,
})
formats.append(fmt)
self._sort_formats(formats)
title = remove_start(self._og_search_title(webpage), 'TéléMB : ')
description = self._html_search_regex(
r'<meta property="og:description" content="(.+?)" />',
webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
| unlicense | 1,713,063,301,316,409,300 | 36.961538 | 112 | 0.498818 | false |
FireWalkerX/eyeOS-FOSS-V.2.0 | devtools/qooxdoo-sdk/tool/pylib/graph/classes/Digraph.py | 4 | 19100 | # Copyright (c) 2007-2009 Pedro Matiello <[email protected]>
# Nathan Davis <[email protected]>
# Zsolt Haraszti <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Digraph class
"""
# Imports
from graph import *
from graph.algorithms import filters
class digraph (object):
"""
Digraph class.
Digraphs are built of nodes and directed edges.
@sort: __init__, __getitem__, __iter__, __len__, __str__, add_edge, add_edge_attribute,
add_graph, add_node, add_node_attribute, add_nodes, add_spanning_tree, complete, degree,
del_edge, del_node, edges, get_edge_attributes, get_edge_label, get_edge_weight,
get_node_attributes, has_edge, has_node, incidents, inverse, neighbors, nodes, order,
set_edge_label, set_edge_weight, traversal, generate, read, write, accessibility,
breadth_first_search, cut_edges, cut_nodes, depth_first_search, heuristic_search,
minimal_spanning_tree, mutual_accessibility, shortest_path, topological_sorting
"""
def __init__(self):
"""
Initialize a digraph.
"""
self.node_neighbors = {} # Pairing: Node -> Neighbors
self.edge_properties = {} # Pairing: Edge -> (Label, Weight)
self.node_incidence = {} # Pairing: Node -> Incident nodes
self.node_attr = {} # Pairing: Node -> Attributes
self.edge_attr = {} # Pairing: Edge -> Attributes
def __str__(self):
"""
Return a string representing the digraph when requested by str() (or print).
@rtype: string
@return: String representing the graph.
"""
return "<graph object " + str(self.nodes()) + " " + str(self.edges()) + ">"
def __len__(self):
"""
Return the order of the digraph when requested by len().
@rtype: number
@return: Size of the graph.
"""
return len(self.node_neighbors)
def __iter__(self):
"""
Return a iterator passing through all nodes in the digraph.
@rtype: iterator
@return: Iterator passing through all nodes in the digraph.
"""
for each in self.node_neighbors.iterkeys():
yield each
def __getitem__(self, node):
"""
Return a iterator passing through all neighbors of the given node.
@rtype: iterator
@return: Iterator passing through all neighbors of the given node.
"""
for each in self.node_neighbors[node]:
yield each
def read(self, string, fmt='xml'):
"""
Read a graph from a string. Nodes and edges specified in the input will be added to the
current graph.
@type string: string
@param string: Input string specifying a graph.
@type fmt: string
@param fmt: Input format. Possible formats are:
1. 'xml' - XML (default)
"""
if (fmt == 'xml'):
readwrite.read_xml(self, string)
def write(self, fmt='xml'):
"""
Write the graph to a string. Depending of the output format, this string can be used by
read() to rebuild the graph.
@type fmt: string
@param fmt: Output format. Possible formats are:
1. 'xml' - XML (default)
2. 'dot' - DOT Language (for GraphViz)
3. 'dotwt' - DOT Language with weight information
@rtype: string
@return: String specifying the graph.
"""
if (fmt == 'xml'):
return readwrite.write_xml(self)
elif (fmt == 'dot'):
return readwrite.write_dot_digraph(self, False)
elif (fmt == 'dotwt'):
return readwrite.write_dot_digraph(self, True)
def generate(self, num_nodes, num_edges, weight_range=(1, 1)):
"""
Add nodes and random edges to the graph.
@type num_nodes: number
@param num_nodes: Number of nodes.
@type num_edges: number
@param num_edges: Number of edges.
@type weight_range: tuple
@param weight_range: tuple of two integers as lower and upper limits on randomly generated
weights (uniform distribution).
"""
generators.generate(self, num_nodes, num_edges, weight_range)
def nodes(self):
"""
Return node list.
@rtype: list
@return: Node list.
"""
return self.node_neighbors.keys()
def neighbors(self, node):
"""
Return all nodes that are directly accessible from given node.
@type node: node
@param node: Node identifier
@rtype: list
@return: List of nodes directly accessible from given node.
"""
return self.node_neighbors[node]
def incidents(self, node):
"""
Return all nodes that are incident to the given node.
@type node: node
@param node: Node identifier
@rtype: list
@return: List of nodes directly accessible from given node.
"""
return self.node_incidence[node]
def edges(self):
"""
Return all edges in the graph.
@rtype: list
@return: List of all edges in the graph.
"""
return self.edge_properties.keys()
def has_node(self, node):
"""
Return whether the requested node exists.
@type node: node
@param node: Node identifier
@rtype: boolean
@return: Truth-value for node existence.
"""
return self.node_neighbors.has_key(node)
def add_node(self, node, attrs=[]):
"""
Add given node to the graph.
@attention: While nodes can be of any type, it's strongly recommended to use only numbers
and single-line strings as node identifiers if you intend to use write().
@type node: node
@param node: Node identifier.
@type attrs: list
@param attrs: List of node attributes specified as (attribute, value) tuples.
"""
if (node not in self.node_neighbors):
self.node_neighbors[node] = []
self.node_incidence[node] = []
self.node_attr[node] = attrs
def add_nodes(self, nodelist):
"""
Add given nodes to the graph.
@attention: While nodes can be of any type, it's strongly recommended to use only numbers
and single-line strings as node identifiers if you intend to use write().
@type nodelist: list
@param nodelist: List of nodes to be added to the graph.
"""
for each in nodelist:
self.add_node(each)
def add_edge(self, u, v, wt=1, label='', attrs=[]):
"""
Add an directed edge (u,v) to the graph connecting nodes u to v.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@type wt: number
@param wt: Edge weight.
@type label: string
@param label: Edge label.
@type attrs: list
@param attrs: List of node attributes specified as (attribute, value) tuples.
"""
if (v not in self.node_neighbors[u]):
self.node_neighbors[u].append(v)
self.node_incidence[v].append(u)
self.edge_properties[(u, v)] = [label, wt]
self.edge_attr[(u, v)] = attrs
def del_node(self, node):
"""
Remove a node from the graph.
@type node: node
@param node: Node identifier.
"""
for each in list(self.incidents(node)):
self.del_edge(each, node)
for each in list(self.neighbors(node)):
self.del_edge(node, each)
del(self.node_neighbors[node])
del(self.node_incidence[node])
del(self.node_attr[node])
def del_edge(self, u, v):
"""
Remove an directed edge (u, v) from the graph.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
"""
self.node_neighbors[u].remove(v)
self.node_incidence[v].remove(u)
del(self.edge_properties[(u,v)])
del(self.edge_attr[(u,v)])
def get_edge_weight(self, u, v):
"""
Get the weight of an edge.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@rtype: number
@return: Edge weight.
"""
return self.edge_properties[(u, v)][1]
def set_edge_weight(self, u, v, wt):
"""
Set the weight of an edge.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@type wt: number
@param wt: Edge weight.
"""
self.edge_properties[(u, v)][1] = wt
def get_edge_label(self, u, v):
"""
Get the label of an edge.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@rtype: string
@return: Edge label
"""
return self.edge_properties[(u, v)][0]
def set_edge_label(self, u, v, label):
"""
Set the label of an edge.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@type label: string
@param label: Edge label.
"""
self.edge_properties[(u, v)][0] = label
def add_node_attribute(self, node, attr):
"""
Add attribute to the given node.
@type node: node
@param node: Node identifier
@type attr: tuple
@param attr: Node attribute specified as a tuple in the form (attribute, value).
"""
self.node_attr[node] = self.node_attr[node] + [attr]
def get_node_attributes(self, node):
"""
Return the attributes of the given node.
@type node: node
@param node: Node identifier
@rtype: list
@return: List of attributes specified tuples in the form (attribute, value).
"""
return self.node_attr[node]
def add_edge_attribute(self, u, v, attr):
"""
Add attribute to the given edge.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@type attr: tuple
@param attr: Node attribute specified as a tuple in the form (attribute, value).
"""
self.edge_attr[(u,v)] = self.edge_attr[(u,v)] + [attr]
def get_edge_attributes(self, u, v):
"""
Return the attributes of the given edge.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@rtype: list
@return: List of attributes specified tuples in the form (attribute, value).
"""
return self.edge_attr[(u,v)]
def has_edge(self, u, v):
"""
Return whether an edge between nodes u and v exists.
@type u: node
@param u: One node.
@type v: node
@param v: Other node.
@rtype: boolean
@return: Truth-value for edge existence.
"""
return self.edge_properties.has_key((u,v))
def order(self, node):
"""
Return the order of the given node.
@rtype: number
@return: Order of the given node.
"""
return len(self.neighbors(node))
def degree(self, node):
"""
Return the degree of the given node.
@rtype: number
@return: Order of the given node.
"""
return len(self.node_incidence[node])
def complete(self):
"""
Make the graph a complete graph.
@attention: This will modify the current graph.
"""
for each in self.nodes():
for other in self.nodes():
if (each != other):
self.add_edge(each, other)
def inverse(self):
"""
Return the inverse of the graph.
@rtype: graph
@return: Complement graph for the graph.
"""
inv = digraph()
inv.add_nodes(self.nodes())
inv.complete()
for each in self.edges():
inv.del_edge(each[0], each[1])
return inv
def add_graph(self, graph):
"""
Add other graph to the graph.
@attention: Attributes and labels are not preserved.
@type graph: graph
@param graph: Graph
"""
self.add_nodes(graph.nodes())
for each_node in graph.nodes():
for each_edge in graph.neighbors(each_node):
self.add_edge(each_node, each_edge)
def add_spanning_tree(self, st):
"""
Add a spanning tree to the graph.
@type st: dictionary
@param st: Spanning tree.
"""
self.add_nodes(st.keys())
for each in st:
if (st[each] is not None):
self.add_edge(st[each], each)
def traversal(self, node, order='pre'):
"""
Graph traversal iterator.
@type node: node
@param node: Node.
@type order: string
@param order: traversal ordering. Possible values are:
2. 'pre' - Preordering (default)
1. 'post' - Postordering
@rtype: iterator
@return: Traversal iterator.
"""
for each in traversal.traversal(self, node, order):
yield each
def depth_first_search(self, root=None, filter=filters.null()):
"""
Depht-first search.
@type root: node
@param root: Optional root node (will explore only root's connected component)
@rtype: tuple
@return: tupple containing a dictionary and two lists:
1. Generated spanning tree
2. Graph's preordering
3. Graph's postordering
"""
return searching.depth_first_search(self, root, filter)
def accessibility(self):
"""
Accessibility matrix (transitive closure).
@rtype: dictionary
@return: Accessibility information for each node.
"""
return accessibility.accessibility(self)
def breadth_first_search(self, root=None, filter=filters.null()):
"""
Breadth-first search.
@type root: node
@param root: Optional root node (will explore only root's connected component)
@rtype: dictionary
@return: A tuple containing a dictionary and a list.
1. Generated spanning tree
2. Graph's level-based ordering
"""
return searching.breadth_first_search(self, root, filter=filter)
def mutual_accessibility(self):
"""
Mutual-accessibility matrix (strongly connected components).
@rtype: list
@return: Mutual-accessibility information for each node.
"""
return accessibility.mutual_accessibility(self)
def topological_sorting(self):
"""
Topological sorting.
@attention: Topological sorting is meaningful only for directed acyclic graphs.
@rtype: list
@return: Topological sorting for the graph.
"""
return sorting.topological_sorting(self)
def minimal_spanning_tree(self, root=None):
"""
Minimal spanning tree.
@type root: node
@param root: Optional root node (will explore only root's connected component)
@attention: Minimal spanning tree meaningful only for weighted graphs.
@rtype: list
@return: Generated spanning tree.
"""
return minmax.minimal_spanning_tree(self, root)
def shortest_path(self, source):
"""
Return the shortest path distance between source node and all other nodes using Dijkstra's
algorithm.
@attention: All weights must be nonnegative.
@type source: node
@param source: Node from which to start the search.
@rtype: tuple
@return: A tuple containing two dictionaries, each keyed by target nodes.
1. Shortest path spanning tree
2. Shortest distance from given source to each target node
Inaccessible target nodes do not appear in either dictionary.
"""
return minmax.shortest_path(self, source)
def heuristic_search(self, start, goal, heuristic):
"""
A* search algorithm.
A set of heuristics is available under C{graph.heuristics}. User-created heuristics are
allowed too.
@type start: node
@param start: Start node
@type goal: node
@param goal: Goal node
@type heuristic: function
@param heuristic: Heuristic function
@rtype: list
@return: Optimized path from start to goal node
"""
return minmax.heuristic_search(self, start, goal, heuristic)
def cut_edges(self):
"""
Return the cut-edges of the given graph.
@rtype: list
@return: List of cut-edges.
"""
return accessibility.cut_edges(self)
def cut_nodes(self):
"""
Return the cut-nodes of the given graph.
@rtype: list
@return: List of cut-nodes.
"""
return accessibility.cut_nodes(self)
def find_cycle(self):
"""
Find a cycle in the digraph.
This function will return a list of nodes which form a cycle in the graph or an empty list if
no cycle exists.
@rtype: list
@return: List of nodes.
"""
return cycles.find_cycle(self, directed=True)
| agpl-3.0 | -8,219,105,318,877,260,000 | 26.601156 | 101 | 0.563194 | false |
geosolutions-it/wps-remote | src/wpsremote/xmpp_data/configs/myservice/code/test.py | 1 | 3686 | # (c) 2016 Open Source Geospatial Foundation - all rights reserved
# (c) 2014 - 2015 Centre for Maritime Research and Experimentation (CMRE)
# (c) 2013 - 2014 German Aerospace Center (DLR)
# This code is licensed under the GPL 2.0 license, available at the root
# application directory.
import subprocess
import logging.config
import logging
import argparse
import sys
import os
import uuid
import zipfile
import time
# constants
# id = os.urandom(10)
id = str(uuid.uuid4())
gdalContour = r'/usr/bin/gdal_contour'
dst = r'contour_'+id[:13]
src = '%s/../../../resource_dir/srtm_39_04/srtm_39_04_c.tif' % os.path.dirname(os.path.abspath(__file__))
cmd = '-a elev' # just for example!
interval = '-i'
class GDALTest(object):
def __init__(self, args):
self.args = args
self.create_logger("logger_test.properties")
self.logger.info("ProgressInfo:0.0%")
def run(self):
trg = '%s/../../../output/%s/%s.shp' % (os.path.dirname(os.path.abspath(__file__)), self.args.execution_id, dst)
# fullCmd = ' '.join([gdalContour, cmd, self.youCanQuoteMe(src), \
# self.youCanQuoteMe(dst), interval, self.args.interval])
fullCmd = ' '.join([gdalContour, cmd, src, trg, interval, self.args.interval])
self.logger.debug("Running command > " + fullCmd)
self.logger.info("going to sleep again...")
time.sleep(30) # Delays for 30 seconds. You can also use a float value.
proc = subprocess.Popen(fullCmd.split(), stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
for line in proc.stdout:
self.logger.info(line)
# call communicate to retrieve return code of subprocess
proc.communicate()
ret = proc.returncode
self.logger.info("...waking up and going to sleep again...")
time.sleep(30) # Delays for 30 seconds. You can also use a float value.
if (ret == 0):
# zipf = zipfile.ZipFile(self.args.workdir+'/contour.zip', 'w')
# self.zipdir(self.args.workdir+'/', zipf)
output_dir = '%s/../../../output/%s' % (os.path.dirname(os.path.abspath(__file__)), self.args.execution_id)
zipf = zipfile.ZipFile(output_dir+'/contour.zip', 'w')
self.zipdir(output_dir+'/', zipf)
zipf.close()
self.logger.info("ProgressInfo:100%")
else:
self.logger.critical("Error occurred during processing.")
return ret
# see note below
def youCanQuoteMe(self, item):
return "\"" + item + "\""
def zipdir(self, path, zip):
for root, dirs, files in os.walk(path):
files = [fi for fi in files if fi.startswith(dst)]
for file in files:
zip.write(os.path.join(root, file))
def create_logger(self, logger_config_file):
defaults = {}
logging.config.fileConfig(str(logger_config_file), defaults=defaults)
self.logger = logging.getLogger("main.create_logger")
self.logger.debug("Logger initialized with file " + str(logger_config_file))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--interval", nargs='?', default="10", help="Elevation interval between contours.")
parser.add_argument("-w", "--workdir", nargs='?', default="", help="Remote process sandbox working directory.")
parser.add_argument("-e", "--execution_id", nargs='?', default="", help="Remote process Unique Execution Id.")
cmdargs = parser.parse_args()
gdalTest = GDALTest(cmdargs)
return_code = gdalTest.run()
sys.exit(return_code)
| gpl-2.0 | -1,464,296,127,468,004,400 | 36.232323 | 120 | 0.622355 | false |
majdigital/bigworldgraph | backend/bwg/wikidata_mixins.py | 1 | 13085 | # -*- coding: utf-8 -*-
"""
This module provides two different way to access Wikidata:
* Through the Wikimedia API with ``Pywikibot`` as a wrapper
* Over a scraper using ``BeautifulSoup4``
Currently, accessing the data via the API is faster than the scraper.
"""
# STD
import abc
import hashlib
import threading
# EXT
import pywikibot
from pywikibot.data import api
# PROJECT
from bwg.helpers import construct_dict_from_source
from bwg.serializing import retry_with_fallback
class RequestCache:
"""
Special class used as a Cache, so that requests being made don't have to be repeated if they occurred in the past.
"""
def __init__(self):
self.lock = threading.Lock()
self.cache = {}
self.requested = set()
self.number_of_requests = 0
self.number_of_avoided_requests = 0
def __contains__(self, item):
return item in self.requested
def __delitem__(self, key):
del self.cache[key]
self.requested.remove(key)
def __getitem__(self, key):
return self.cache[key]
def __setitem__(self, key, value):
self.cache[key] = value
self.requested.add(key)
def __enter__(self):
self.lock.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.lock.release()
def __len__(self):
return len(self.requested)
def request(self, key, request_func, *request_args, **request_kwargs):
"""
Make a request, but make a lookup to the cache first to see if you may be able to avoid it.
:param key: Key that should be used to cache the request.
:type key: str, int
:param request_func: Function to do the request.
:type request_func: func
:param request_args: Arguments for request.
:type request_args: tuple
:param request_kwargs: Key word arguments for request.
:type request_kwargs: dict
"""
if key in self:
self.number_of_avoided_requests += 1
return self[key]
request_result = request_func(*request_args, **request_kwargs)
self.number_of_requests += 1
self[key] = request_result
return request_result
class AbstractWikidataMixin:
"""
Define the functions inheriting subclasses should implement.
"""
@abc.abstractmethod
def get_matches(self, name, language):
"""
Get matches for an entity's name on Wikidata.
:param name: Name of entity.
:type name: str
:param language: Abbreviation of target language.
:type language: str
:return: List of matches.
:rtype: list
"""
pass
@abc.abstractmethod
def get_entity(self, wikidata_id, language, relevant_properties, properties_implying_relations):
"""
Get Wikidata information about an entity based on its identifier.
:param wikidata_id: Wikidata ID of desired entity.
:type wikidata_id: str
:param language: Abbreviation of target language.
:type language: str
:param relevant_properties: Types of claims that should be included.
:type relevant_properties: list
:param properties_implying_relations: Set of property IDs for properties that are not mere characteristics, but
imply other relations that should later be shown in the graph.
:type properties_implying_relations: list, set
:return: List of dates about every sense of the entity (un-ambiguous entites just will have one sense).
:rtype: list
"""
pass
class WikidataAPIMixin(AbstractWikidataMixin):
"""
Access Wikidata information via Wikimedia's API.
"""
wikidata_site = pywikibot.Site("wikidata", "wikidata")
request_cache = RequestCache()
match_cache = RequestCache()
@retry_with_fallback(triggering_error=KeyError, language="en")
def get_matches(self, name, language):
"""
Get matches for an entity's name on Wikidata.
:param name: Name of entity.
:type name: str
:param language: Abbreviation of target language.
:type language: str
:return: List of matches.
:rtype: list
"""
additional_request_parameters = {
"action": "wbsearchentities",
"language": language,
"type": "item",
"search": name
}
response = self._request(**additional_request_parameters)
if len(response["search"]) == 0:
return []
return [
construct_dict_from_source(
{
"uri": lambda source: source["concepturi"],
"id": lambda source: source["id"],
"description": lambda source: source["description"],
"label": lambda source: source["label"]
},
search_result
)
for search_result in response["search"]
]
@retry_with_fallback(triggering_error=KeyError, language="en")
def get_entity(self, wikidata_id, language, relevant_properties, properties_implying_relations, recursively=True):
"""
Get Wikidata information about an entity based on its identifier.
:param wikidata_id: Wikidata ID of desired entity.
:type wikidata_id: str
:param language: Abbreviation of target language.
:type language: str
:param relevant_properties: Types of claims that should be included.
:type relevant_properties: list
:param properties_implying_relations: Dict of property IDs for properties that are not mere characteristics, but
imply other relations that should later be shown in the graph. The properties are the keys and the entity node
class they're implying are the values.
:type properties_implying_relations: dict
:param recursively: Request data for fof nodes recursively.
:type recursively: bool
:return: Wikidata entity as dictionary
:rtype: dict
"""
additional_request_parameters = {
"ids": wikidata_id
}
response = self._request(**additional_request_parameters)
if len(response["entities"]) == 0:
return {}
return [
construct_dict_from_source(
{
"aliases": lambda source: [alias_dict["value"] for alias_dict in source["aliases"][language]],
"description": lambda source: source["descriptions"][language]["value"],
"id": lambda source: source["id"],
"label": lambda source: source["labels"][language]["value"],
"modified": lambda source: source["modified"],
"claims": lambda source: self.resolve_claims(
source["claims"], language=language,
relevant_properties=relevant_properties,
properties_implying_relations=properties_implying_relations,
recursively=recursively
) if recursively else {}
},
entity
)
for id_, entity in response["entities"].items()
][0]
@retry_with_fallback(triggering_error=KeyError, language="en")
def resolve_claims(self, claims, language, relevant_properties, properties_implying_relations, recursively=True):
"""
Resolve the claims (~ claimed facts) about a wikidata entity.
:param claims: Dictionary with property ID as key and claim data as value.
:type claims: dict
:param language: Abbreviation of target language.
:type language: str
:param relevant_properties: Types of claims that should be included.
:type relevant_properties: list
:param properties_implying_relations: Set of property IDs for properties that are not mere characteristics, but
imply other relations that should later be shown in the graph.
:type properties_implying_relations: list, set
:param recursively: Request data for fof nodes recursively.
:type recursively: bool
:return: List of dates about every sense of the entity (un-ambiguous entities just will have one sense).
:rtype: list
"""
properties = {}
for property_id, claim in claims.items():
if property_id in relevant_properties:
property_name = self.get_property_name(property_id, language=language)
if property_id != "P18":
target = self.get_entity_name(claim[0]["mainsnak"]["datavalue"]["value"]["id"], language=language)
else:
# Handle images differently
target = self.get_image_url(claim[0]["mainsnak"]["datavalue"]["value"])
property_data = {
"target": target,
"implies_relation": property_id in properties_implying_relations,
"entity_class": properties_implying_relations.get(property_id, None),
}
if property_id in properties_implying_relations:
target_senses = self.match_cache.request(
target, self.get_matches, target, language=language
)
property_data["target_data"] = [
self.request_cache.request(
target_sense["id"], self.get_entity,
target_sense["id"], language=language,
relevant_properties=relevant_properties,
properties_implying_relations=properties_implying_relations,
recursively=False
)
for target_sense in target_senses
]
else:
property_data["target_data"] = {}
properties[property_name] = property_data
return properties
@retry_with_fallback(triggering_error=KeyError, language="en")
def get_property_name(self, property_id, language):
"""
Get the name of a wikidata property.
:param property_id: Wikidata property ID.
:type property_id: str
:param language: Abbreviation of target language.
:type language: str
:return: Name of property.
:rtype: str
"""
additional_request_parameters = {
"ids": property_id
}
response = self._request(**additional_request_parameters)
return [
entity["labels"][language]["value"]
for id_, entity in response["entities"].items()
][0]
@retry_with_fallback(triggering_error=KeyError, language="en")
def get_entity_name(self, entity_id, language):
"""
Get the name of a wikidata entity.
:param entity_id: Wikidata property ID.
:type entity_id: str
:param language: Abbreviation of target language.
:type language: str
:return: Name of entity.
:rtype: str
"""
additional_request_parameters = {
"ids": entity_id
}
response = self._request(**additional_request_parameters)
return [
entity["labels"][language]["value"]
for id_, entity in response["entities"].items()
][0]
def _request(self, **additional_request_parameters):
"""
Send a request to the API.
:param additional_request_parameters: Additional parameters for the request that is being sent to the API.
:type additional_request_parameters: dict
:return: Response following the request.
:rtype: dict
"""
request_parameters = {
"site": self.wikidata_site,
"action": 'wbgetentities',
"format": 'json',
"use_get": True,
"throttle": False,
"max_retries": 30,
"maxlag": 20,
"retry_wait": 20
}
request_parameters.update(additional_request_parameters)
request = api.Request(**request_parameters)
return request.submit()
@staticmethod
def get_image_url(image_name):
"""
Generate Wikidata URL for a Wikidata image.
:param image_name: Name of image as given by the API request.
:type image_name: str
:return: Link to image.
:rtype: str
"""
# See http://stackoverflow.com/questions/34393884/how-to-get-image-url-property-from-wikidata-item-by-api
# for explanation
image_name = image_name.replace(" ", "_")
md5_sum = hashlib.md5(image_name.encode('utf-8')).hexdigest()
return "https://upload.wikimedia.org/wikipedia/commons/{a}/{ab}/{image_name}".format(
image_name=image_name, a=md5_sum[0], ab=md5_sum[0:2]
)
| mit | -788,782,512,497,445,600 | 35.046832 | 120 | 0.584868 | false |
pong3489/TEST_Mission | Lib/site-packages/numpy/lib/utils.py | 54 | 36175 | import os
import sys
import types
import re
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
from numpy.core import product, ndarray, ufunc
__all__ = ['issubclass_', 'get_numpy_include', 'issubsctype', 'issubdtype',
'deprecate', 'deprecate_with_doc', 'get_numarray_include',
'get_include', 'info', 'source', 'who', 'lookfor', 'byte_bounds',
'may_share_memory', 'safe_eval']
def get_include():
"""
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
else:
# using installed numpy core headers
import numpy.core as core
d = os.path.join(os.path.dirname(core.__file__), 'include')
return d
def get_numarray_include(type=None):
"""
Return the directory that contains the numarray \\*.h header files.
Extension modules that need to compile against numarray should use this
function to locate the appropriate include directory.
Parameters
----------
type : any, optional
If `type` is not None, the location of the NumPy headers is returned
as well.
Returns
-------
dirs : str or list of str
If `type` is None, `dirs` is a string containing the path to the
numarray headers.
If `type` is not None, `dirs` is a list of strings with first the
path(s) to the numarray headers, followed by the path to the NumPy
headers.
Notes
-----
Useful when using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_numarray_include()])
...
"""
from numpy.numarray import get_numarray_include_dirs
include_dirs = get_numarray_include_dirs()
if type is None:
return include_dirs[0]
else:
return include_dirs + [get_include()]
if sys.version_info < (2, 4):
# Can't set __name__ in 2.3
import new
def _set_function_name(func, name):
func = new.function(func.func_code, func.func_globals,
name, func.func_defaults, func.func_closure)
return func
else:
def _set_function_name(func, name):
func.__name__ = name
return func
class _Deprecate(object):
"""
Decorator class to deprecate old functions.
Refer to `deprecate` for details.
See Also
--------
deprecate
"""
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
self.message = message
def __call__(self, func, *args, **kwargs):
"""
Decorator call. Refer to ``decorate``.
"""
old_name = self.old_name
new_name = self.new_name
message = self.message
import warnings
if old_name is None:
try:
old_name = func.func_name
except AttributeError:
old_name = func.__name__
if new_name is None:
depdoc = "`%s` is deprecated!" % old_name
else:
depdoc = "`%s` is deprecated, use `%s` instead!" % \
(old_name, new_name)
if message is not None:
depdoc += "\n" + message
def newfunc(*args,**kwds):
"""`arrayrange` is deprecated, use `arange` instead!"""
warnings.warn(depdoc, DeprecationWarning)
return func(*args, **kwds)
newfunc = _set_function_name(newfunc, old_name)
doc = func.__doc__
if doc is None:
doc = depdoc
else:
doc = '\n\n'.join([depdoc, doc])
newfunc.__doc__ = doc
try:
d = func.__dict__
except AttributeError:
pass
else:
newfunc.__dict__.update(d)
return newfunc
def deprecate(*args, **kwargs):
"""
Issues a DeprecationWarning, adds warning to `old_name`'s
docstring, rebinds ``old_name.__name__`` and returns the new
function object.
This function may also be used as a decorator.
Parameters
----------
func : function
The function to be deprecated.
old_name : str, optional
The name of the function to be deprecated. Default is None, in which
case the name of `func` is used.
new_name : str, optional
The new name for the function. Default is None, in which case
the deprecation message is that `old_name` is deprecated. If given,
the deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
Additional explanation of the deprecation. Displayed in the docstring
after the warning.
Returns
-------
old_func : function
The deprecated function.
Examples
--------
Note that ``olduint`` returns a value after printing Deprecation Warning:
>>> olduint = np.deprecate(np.uint)
>>> olduint(6)
/usr/lib/python2.5/site-packages/numpy/lib/utils.py:114:
DeprecationWarning: uint32 is deprecated
warnings.warn(str1, DeprecationWarning)
6
"""
# Deprecate may be run as a function or as a decorator
# If run as a function, we initialise the decorator class
# and execute its __call__ method.
if args:
fn = args[0]
args = args[1:]
# backward compatibility -- can be removed
# after next release
if 'newname' in kwargs:
kwargs['new_name'] = kwargs.pop('newname')
if 'oldname' in kwargs:
kwargs['old_name'] = kwargs.pop('oldname')
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
deprecate_with_doc = lambda msg: _Deprecate(message=msg)
get_numpy_include = deprecate(get_include, 'get_numpy_include', 'get_include')
#--------------------------------------------
# Determine if two arrays can share memory
#--------------------------------------------
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second integer is
just past the last byte of the array. If `a` is not contiguous it
will not use every byte between the (`low`, `high`) values.
Examples
--------
>>> I = np.eye(2, dtype='f'); I.dtype
dtype('float32')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2, dtype='G'); I.dtype
dtype('complex192')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
nd_a = len(ashape)
bytes_a = int(ai['typestr'][2:])
a_low = a_high = a_data
if astrides is None: # contiguous case
a_high += product(ashape, dtype=int)*bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape-1)*stride
else:
a_high += (shape-1)*stride
a_high += bytes_a
return a_low, a_high
def may_share_memory(a, b):
"""
Determine if two arrays can share memory
The memory-bounds of a and b are computed. If they overlap then
this function returns True. Otherwise, it returns False.
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Parameters
----------
a, b : ndarray
Returns
-------
out : bool
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
"""
a_low, a_high = byte_bounds(a)
b_low, b_high = byte_bounds(b)
if b_low >= a_high or a_low >= b_high:
return False
return True
#-----------------------------------------------------------------------------
# Function for output and information on the variables used.
#-----------------------------------------------------------------------------
def who(vardict=None):
"""
Print the Numpy arrays in the given dictionary.
If there is no dictionary passed in or `vardict` is None then returns
Numpy arrays in the globals() dictionary (all Numpy arrays in the
namespace).
Parameters
----------
vardict : dict, optional
A dictionary possibly containing ndarrays. Default is globals().
Returns
-------
out : None
Returns 'None'.
Notes
-----
Prints out the name, shape, bytes and type of all of the ndarrays present
in `vardict`.
Examples
--------
>>> a = np.arange(10)
>>> b = np.ones(20)
>>> np.who()
Name Shape Bytes Type
===========================================================
a 10 40 int32
b 20 160 float64
Upper bound on total bytes = 200
>>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
... 'idx':5}
>>> np.who(d)
Name Shape Bytes Type
===========================================================
y 3 24 float64
x 2 16 float64
Upper bound on total bytes = 40
"""
if vardict is None:
frame = sys._getframe().f_back
vardict = frame.f_globals
sta = []
cache = {}
for name in vardict.keys():
if isinstance(vardict[name],ndarray):
var = vardict[name]
idv = id(var)
if idv in cache.keys():
namestr = name + " (%s)" % cache[idv]
original=0
else:
cache[idv] = name
namestr = name
original=1
shapestr = " x ".join(map(str, var.shape))
bytestr = str(var.nbytes)
sta.append([namestr, shapestr, bytestr, var.dtype.name,
original])
maxname = 0
maxshape = 0
maxbyte = 0
totalbytes = 0
for k in range(len(sta)):
val = sta[k]
if maxname < len(val[0]):
maxname = len(val[0])
if maxshape < len(val[1]):
maxshape = len(val[1])
if maxbyte < len(val[2]):
maxbyte = len(val[2])
if val[4]:
totalbytes += int(val[2])
if len(sta) > 0:
sp1 = max(10,maxname)
sp2 = max(10,maxshape)
sp3 = max(10,maxbyte)
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
print prval + "\n" + "="*(len(prval)+5) + "\n"
for k in range(len(sta)):
val = sta[k]
print "%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
val[3])
print "\nUpper bound on total bytes = %d" % totalbytes
return
#-----------------------------------------------------------------------------
# NOTE: pydoc defines a help function which works simliarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of
# width characters. End lines on a comma and begin argument list
# indented with the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals
# to see if something is defined
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__:module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while 1:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x],types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
"""
Get help information for a function, class, or module.
Parameters
----------
object : object or str, optional
Input object or name to get information about. If `object` is a
numpy object, its docstring is given. If it is a string, available
modules are searched for matching objects.
If None, information about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
File like object that the output is written to, default is ``stdout``.
The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
See Also
--------
source, lookfor
Notes
-----
When used interactively with an object, ``np.info(obj)`` is equivalent to
``help(obj)`` on the Python prompt or ``obj?`` on the IPython prompt.
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
polyval(p, x)
Evaluate the polynomial p at x.
...
When using a string for `object` it is possible to get multiple results.
>>> np.info('fft') # doctest: +SKIP
*** Found in numpy ***
Core FFT routines
...
*** Found in numpy.fft ***
fft(a, n=None, axis=-1)
...
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
import pydoc, inspect
if hasattr(object,'_ppimport_importer') or \
hasattr(object, '_ppimport_module'):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if object is None:
info(info)
elif isinstance(object, ndarray):
import numpy.numarray as nn
nn.info(object, output=output, numpy=1)
elif isinstance(object, str):
if _namedict is None:
_namedict, _dictlist = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print >> output, "\n *** Repeat reference found in %s *** " % namestr
else:
objlist.append(id(obj))
print >> output, " *** Found in %s ***" % namestr
info(obj)
print >> output, "-"*maxwidth
numfound += 1
except KeyError:
pass
if numfound == 0:
print >> output, "Help for %s not found." % object
else:
print >> output, "\n *** Total of %d references found. ***" % numfound
elif inspect.isfunction(object):
name = object.func_name
arguments = inspect.formatargspec(*inspect.getargspec(object))
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
print >> output, inspect.getdoc(object)
elif inspect.isclass(object):
name = object.__name__
arguments = "()"
try:
if hasattr(object, '__init__'):
arguments = inspect.formatargspec(*inspect.getargspec(object.__init__.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
except:
pass
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object,'__init__'):
print >> output, inspect.getdoc(object.__init__)
else:
print >> output, inspect.getdoc(object)
methods = pydoc.allmethods(object)
if methods != []:
print >> output, "\n\nMethods:\n"
for meth in methods:
if meth[0] == '_':
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(inspect.getdoc(thisobj) or "None")
print >> output, " %s -- %s" % (meth, methstr)
elif type(object) is types.InstanceType: ## check for __call__ method
print >> output, "Instance of class: ", object.__class__.__name__
print >> output
if hasattr(object, '__call__'):
arguments = inspect.formatargspec(*inspect.getargspec(object.__call__.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if hasattr(object,'name'):
name = "%s" % object.name
else:
name = "<name>"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
doc = inspect.getdoc(object.__call__)
if doc is not None:
print >> output, inspect.getdoc(object.__call__)
print >> output, inspect.getdoc(object)
else:
print >> output, inspect.getdoc(object)
elif inspect.ismethod(object):
name = object.__name__
arguments = inspect.formatargspec(*inspect.getargspec(object.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
print >> output, inspect.getdoc(object)
elif hasattr(object, '__doc__'):
print >> output, inspect.getdoc(object)
def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a Numpy object.
The source code is only returned for objects written in Python. Many
functions and classes are defined in C and will therefore not return
useful information.
Parameters
----------
object : numpy object
Input object. This can be any object (function, class, module, ...).
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
append 'a' modes.
See Also
--------
lookfor, info
Examples
--------
>>> np.source(np.interp) #doctest: +SKIP
In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
def interp(x, xp, fp, left=None, right=None):
\"\"\".... (full docstring printed)\"\"\"
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
The source code is only returned for objects written in Python.
>>> np.source(np.array) #doctest: +SKIP
Not available for this object.
"""
# Local import to speed up numpy's import time.
import inspect
try:
print >> output, "In file: %s\n" % inspect.getsourcefile(object)
print >> output, inspect.getsource(object)
except:
print >> output, "Not available for this object."
# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
# where kind: "func", "class", "module", "object"
# and index: index in breadth-first namespace traversal
_lookfor_caches = {}
# regexp whose match indicates that the string may contain a function signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
def lookfor(what, module=None, import_modules=True, regenerate=False,
output=None):
"""
Do a keyword search on docstrings.
A list of of objects that matched the search is displayed,
sorted by relevance. All given keywords need to be found in the
docstring for it to be returned as a result, but the order does
not matter.
Parameters
----------
what : str
String containing words to look for.
module : str or list, optional
Name of module(s) whose docstrings to go through.
import_modules : bool, optional
Whether to import sub-modules in packages. Default is True.
regenerate : bool, optional
Whether to re-generate the docstring cache. Default is False.
output : file-like, optional
File-like object to write the output to. If omitted, use a pager.
See Also
--------
source, info
Notes
-----
Relevance is determined only roughly, by checking if the keywords occur
in the function name, at the start of a docstring, etc.
Examples
--------
>>> np.lookfor('binary representation')
Search results for 'binary representation'
------------------------------------------
numpy.binary_repr
Return the binary representation of the input number as a string.
numpy.core.setup_common.long_double_representation
Given a binary dump as given by GNU od -b, look for long double
numpy.base_repr
Return a string representation of a number in the given base system.
...
"""
import pydoc
# Cache
cache = _lookfor_generate_cache(module, import_modules, regenerate)
# Search
# XXX: maybe using a real stemming search engine would be better?
found = []
whats = str(what).lower().split()
if not whats: return
for name, (docstring, kind, index) in cache.iteritems():
if kind in ('module', 'object'):
# don't show modules or objects
continue
ok = True
doc = docstring.lower()
for w in whats:
if w not in doc:
ok = False
break
if ok:
found.append(name)
# Relevance sort
# XXX: this is full Harrison-Stetson heuristics now,
# XXX: it probably could be improved
kind_relevance = {'func': 1000, 'class': 1000,
'module': -1000, 'object': -1000}
def relevance(name, docstr, kind, index):
r = 0
# do the keywords occur within the start of the docstring?
first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
r += sum([200 for w in whats if w in first_doc])
# do the keywords occur in the function name?
r += sum([30 for w in whats if w in name])
# is the full name long?
r += -len(name) * 5
# is the object of bad type?
r += kind_relevance.get(kind, -1000)
# is the object deep in namespace hierarchy?
r += -name.count('.') * 10
r += max(-index / 100, -100)
return r
def relevance_value(a):
return relevance(a, *cache[a])
found.sort(key=relevance_value)
# Pretty-print
s = "Search results for '%s'" % (' '.join(whats))
help_text = [s, "-"*len(s)]
for name in found[::-1]:
doc, kind, ix = cache[name]
doclines = [line.strip() for line in doc.strip().split("\n")
if line.strip()]
# find a suitable short description
try:
first_doc = doclines[0].strip()
if _function_signature_re.search(first_doc):
first_doc = doclines[1].strip()
except IndexError:
first_doc = ""
help_text.append("%s\n %s" % (name, first_doc))
if not found:
help_text.append("Nothing found.")
# Output
if output is not None:
output.write("\n".join(help_text))
elif len(help_text) > 10:
pager = pydoc.getpager()
pager("\n".join(help_text))
else:
print "\n".join(help_text)
def _lookfor_generate_cache(module, import_modules, regenerate):
"""
Generate docstring cache for given module.
Parameters
----------
module : str, None, module
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
regenerate: bool
Re-generate the docstring cache
Returns
-------
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated.
"""
global _lookfor_caches
# Local import to speed up numpy's import time.
import inspect
from cStringIO import StringIO
if module is None:
module = "numpy"
if isinstance(module, str):
try:
__import__(module)
except ImportError:
return {}
module = sys.modules[module]
elif isinstance(module, list) or isinstance(module, tuple):
cache = {}
for mod in module:
cache.update(_lookfor_generate_cache(mod, import_modules,
regenerate))
return cache
if id(module) in _lookfor_caches and not regenerate:
return _lookfor_caches[id(module)]
# walk items and collect docstrings
cache = {}
_lookfor_caches[id(module)] = cache
seen = {}
index = 0
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
if id(item) in seen: continue
seen[id(item)] = True
index += 1
kind = "object"
if inspect.ismodule(item):
kind = "module"
try:
_all = item.__all__
except AttributeError:
_all = None
# import sub-packages
if import_modules and hasattr(item, '__path__'):
for pth in item.__path__:
for mod_path in os.listdir(pth):
this_py = os.path.join(pth, mod_path)
init_py = os.path.join(pth, mod_path, '__init__.py')
if os.path.isfile(this_py) and mod_path.endswith('.py'):
to_import = mod_path[:-3]
elif os.path.isfile(init_py):
to_import = mod_path
else:
continue
if to_import == '__init__':
continue
try:
# Catch SystemExit, too
base_exc = BaseException
except NameError:
# Python 2.4 doesn't have BaseException
base_exc = Exception
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
__import__("%s.%s" % (name, to_import))
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
except base_exc:
continue
for n, v in _getmembers(item):
item_name = getattr(v, '__name__', "%s.%s" % (name, n))
mod_name = getattr(v, '__module__', None)
if '.' not in item_name and mod_name:
item_name = "%s.%s" % (mod_name, item_name)
if not item_name.startswith(name + '.'):
# don't crawl "foreign" objects
if isinstance(v, ufunc):
# ... unless they are ufuncs
pass
else:
continue
elif not (inspect.ismodule(v) or _all is None or n in _all):
continue
stack.append(("%s.%s" % (name, n), v))
elif inspect.isclass(item):
kind = "class"
for n, v in _getmembers(item):
stack.append(("%s.%s" % (name, n), v))
elif hasattr(item, "__call__"):
kind = "func"
doc = inspect.getdoc(item)
if doc is not None:
cache[name] = (doc, kind, index)
return cache
def _getmembers(item):
import inspect
try:
members = inspect.getmembers(item)
except AttributeError:
members = [(x, getattr(item, x)) for x in dir(item)
if hasattr(item, x)]
return members
#-----------------------------------------------------------------------------
# The following SafeEval class and company are adapted from Michael Spencer's
# ASPN Python Cookbook recipe:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469
# Accordingly it is mostly Copyright 2006 by Michael Spencer.
# The recipe, like most of the other ASPN Python Cookbook recipes was made
# available under the Python license.
# http://www.python.org/license
# It has been modified to:
# * handle unary -/+
# * support True/False/None
# * raise SyntaxError instead of a custom exception.
class SafeEval(object):
"""
Object to evaluate constant string expressions.
This includes strings with lists, dicts and tuples using the abstract
syntax tree created by ``compiler.parse``.
For an example of usage, see `safe_eval`.
See Also
--------
safe_eval
"""
if sys.version_info[0] < 3:
def visit(self, node, **kw):
cls = node.__class__
meth = getattr(self,'visit'+cls.__name__,self.default)
return meth(node, **kw)
def default(self, node, **kw):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node, **kw):
for child in node.getChildNodes():
return self.visit(child, **kw)
def visitConst(self, node, **kw):
return node.value
def visitDict(self, node,**kw):
return dict([(self.visit(k),self.visit(v)) for k,v in node.items])
def visitTuple(self, node, **kw):
return tuple([self.visit(i) for i in node.nodes])
def visitList(self, node, **kw):
return [self.visit(i) for i in node.nodes]
def visitUnaryAdd(self, node, **kw):
return +self.visit(node.getChildNodes()[0])
def visitUnarySub(self, node, **kw):
return -self.visit(node.getChildNodes()[0])
def visitName(self, node, **kw):
if node.name == 'False':
return False
elif node.name == 'True':
return True
elif node.name == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.name)
else:
def visit(self, node):
cls = node.__class__
meth = getattr(self, 'visit' + cls.__name__, self.default)
return meth(node)
def default(self, node):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node):
return self.visit(node.body)
def visitNum(self, node):
return node.n
def visitStr(self, node):
return node.s
def visitBytes(self, node):
return node.s
def visitDict(self, node,**kw):
return dict([(self.visit(k), self.visit(v))
for k, v in zip(node.keys, node.values)])
def visitTuple(self, node):
return tuple([self.visit(i) for i in node.elts])
def visitList(self, node):
return [self.visit(i) for i in node.elts]
def visitUnaryOp(self, node):
import ast
if isinstance(node.op, ast.UAdd):
return +self.visit(node.operand)
elif isinstance(node.op, ast.USub):
return -self.visit(node.operand)
else:
raise SyntaxError("Unknown unary op: %r" % node.op)
def visitName(self, node):
if node.id == 'False':
return False
elif node.id == 'True':
return True
elif node.id == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.id)
def safe_eval(source):
"""
Protected string evaluation.
Evaluate a string containing a Python literal expression without
allowing the execution of arbitrary non-literal code.
Parameters
----------
source : str
The string to evaluate.
Returns
-------
obj : object
The result of evaluating `source`.
Raises
------
SyntaxError
If the code has invalid Python syntax, or if it contains non-literal
code.
Examples
--------
>>> np.safe_eval('1')
1
>>> np.safe_eval('[1, 2, 3]')
[1, 2, 3]
>>> np.safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
>>> np.safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
SyntaxError: Unsupported source construct: compiler.ast.CallFunc
"""
# Local import to speed up numpy's import time.
try:
import compiler
except ImportError:
import ast as compiler
walker = SafeEval()
try:
ast = compiler.parse(source, mode="eval")
except SyntaxError, err:
raise
try:
return walker.visit(ast)
except SyntaxError, err:
raise
#-----------------------------------------------------------------------------
| gpl-3.0 | -6,702,397,263,208,574,000 | 30.566318 | 95 | 0.530477 | false |
errx/django | django/conf/locale/sr/formats.py | 394 | 2011 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause | 6,677,947,519,616,462,000 | 42.717391 | 77 | 0.46544 | false |
wolverineav/neutron | neutron/services/metering/drivers/noop/noop_driver.py | 53 | 1594 | # Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import helpers as log_helpers
from neutron.services.metering.drivers import abstract_driver
class NoopMeteringDriver(abstract_driver.MeteringAbstractDriver):
@log_helpers.log_method_call
def update_routers(self, context, routers):
pass
@log_helpers.log_method_call
def remove_router(self, context, router_id):
pass
@log_helpers.log_method_call
def update_metering_label_rules(self, context, routers):
pass
@log_helpers.log_method_call
def add_metering_label_rule(self, context, routers):
pass
@log_helpers.log_method_call
def remove_metering_label_rule(self, context, routers):
pass
@log_helpers.log_method_call
def add_metering_label(self, context, routers):
pass
@log_helpers.log_method_call
def remove_metering_label(self, context, routers):
pass
@log_helpers.log_method_call
def get_traffic_counters(self, context, routers):
pass
| apache-2.0 | -7,643,003,988,865,234,000 | 29.653846 | 75 | 0.715182 | false |
Nicolou/grrSallesRepetition | fckeditor/editor/filemanager/connectors/py/fckcommands.py | 14 | 6293 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%04d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = self.webUserFilesFolder + currentFolder + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 203, customMsg = "Extension not allowed" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| gpl-2.0 | -7,170,777,282,900,533,000 | 30.782828 | 101 | 0.690291 | false |
codeforsanjose/trash-pickup-portal | data/env/lib/python2.7/site-packages/setuptools/extension.py | 229 | 1649 | import sys
import re
import functools
import distutils.core
import distutils.errors
import distutils.extension
from .dist import _get_unpatched
from . import msvc9_support
_Extension = _get_unpatched(distutils.core.Extension)
msvc9_support.patch_for_specialized_compiler()
def _have_cython():
"""
Return True if Cython can be imported.
"""
cython_impl = 'Cython.Distutils.build_ext',
try:
# from (cython_impl) import build_ext
__import__(cython_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
# for compatibility
have_pyrex = _have_cython
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
def _convert_pyx_sources_to_lang(self):
"""
Replace sources with .pyx extensions to sources with the target
language extension. This mechanism allows language authors to supply
pre-converted sources but to prefer the .pyx sources.
"""
if _have_cython():
# the build has Cython, so allow it to compile the .pyx files
return
lang = self.language or ''
target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
sub = functools.partial(re.sub, '.pyx$', target_ext)
self.sources = list(map(sub, self.sources))
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
distutils.core.Extension = Extension
distutils.extension.Extension = Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = Extension
| mit | 4,587,230,672,972,920,300 | 28.981818 | 76 | 0.669497 | false |
SEMCOG/synthpop | synthpop/test/test_categorizer.py | 4 | 3418 | import pytest
import numpy as np
from ..census_helpers import Census
from .. import categorizer as cat
@pytest.fixture
def c():
return Census("827402c2958dcf515e4480b7b2bb93d1025f9389")
@pytest.fixture
def acs_data(c):
population = ['B01001_001E']
sex = ['B01001_002E', 'B01001_026E']
race = ['B02001_0%02dE' % i for i in range(1, 11)]
male_age_columns = ['B01001_0%02dE' % i for i in range(3, 26)]
female_age_columns = ['B01001_0%02dE' % i for i in range(27, 50)]
all_columns = population + sex + race + male_age_columns + \
female_age_columns
df = c.block_group_query(all_columns, "06", "075", tract="030600")
return df
@pytest.fixture
def pums_data(c):
return c.download_population_pums("06", "07506")
def test_categorize(acs_data, pums_data):
p_acs_cat = cat.categorize(acs_data, {
("population", "total"): "B01001_001E",
("age", "19 and under"): "B01001_003E + B01001_004E + B01001_005E + "
"B01001_006E + B01001_007E + B01001_027E + "
"B01001_028E + B01001_029E + B01001_030E + "
"B01001_031E",
("age", "20 to 35"): "B01001_008E + B01001_009E + B01001_010E + "
"B01001_011E + B01001_012E + B01001_032E + "
"B01001_033E + B01001_034E + B01001_035E + "
"B01001_036E",
("age", "35 to 60"): "B01001_013E + B01001_014E + B01001_015E + "
"B01001_016E + B01001_017E + B01001_037E + "
"B01001_038E + B01001_039E + B01001_040E + "
"B01001_041E",
("age", "above 60"): "B01001_018E + B01001_019E + B01001_020E + "
"B01001_021E + B01001_022E + B01001_023E + "
"B01001_024E + B01001_025E + B01001_042E + "
"B01001_043E + B01001_044E + B01001_045E + "
"B01001_046E + B01001_047E + B01001_048E + "
"B01001_049E",
("race", "white"): "B02001_002E",
("race", "black"): "B02001_003E",
("race", "asian"): "B02001_005E",
("race", "other"): "B02001_004E + B02001_006E + B02001_007E + "
"B02001_008E",
("sex", "male"): "B01001_002E",
("sex", "female"): "B01001_026E"
}, index_cols=['NAME'])
assert len(p_acs_cat) == 3
assert len(p_acs_cat.columns) == 11
assert len(p_acs_cat.columns.names) == 2
assert p_acs_cat.columns[0][0] == "age"
assert np.all(cat.sum_accross_category(p_acs_cat) < 2)
def age_cat(r):
if r.AGEP <= 19:
return "19 and under"
elif r.AGEP <= 35:
return "20 to 35"
elif r.AGEP <= 60:
return "35 to 60"
return "above 60"
def race_cat(r):
if r.RAC1P == 1:
return "white"
elif r.RAC1P == 2:
return "black"
elif r.RAC1P == 6:
return "asian"
return "other"
def sex_cat(r):
if r.SEX == 1:
return "male"
return "female"
pums_data, jd_persons = cat.joint_distribution(
pums_data,
cat.category_combinations(p_acs_cat.columns),
{"age": age_cat, "race": race_cat, "sex": sex_cat}
)
| bsd-3-clause | 5,728,817,937,670,630,000 | 35.361702 | 77 | 0.499415 | false |
ginkgobioworks/edge | src/edge/models/genome_updater.py | 1 | 2960 | from contextlib import contextmanager
from edge.models.fragment import Fragment
class Genome_Updater(object):
"""
Mixin with helpers for updating genome.
"""
@contextmanager
def annotate_fragment_by_name(self, name):
f = [x for x in self.fragments.all() if x.name == name]
if len(f) != 1:
raise Exception("Zero or more than one fragments have name %s" % (name,))
u = f[0].indexed_fragment()
yield u
@contextmanager
def annotate_fragment_by_fragment_id(self, fragment_id):
f = [x for x in self.fragments.all() if x.id == fragment_id]
if len(f) != 1:
raise Exception(
"Zero or more than one fragments have ID %s" % (fragment_id,)
)
u = f[0].indexed_fragment()
yield u
@contextmanager
def update_fragment_by_name(self, name, new_name=None):
if self.parent is None:
raise Exception(
"Cannot update fragment without a parent genome. Try editing instead."
)
f = [x for x in self.fragments.filter(name=name)]
if len(f) != 1:
raise Exception("Zero or more than one fragments have name %s" % (name,))
new_name = name if new_name is None else new_name
u = f[0].indexed_fragment().update(new_name)
yield u
self._add_updated_fragment(u)
@contextmanager
def update_fragment_by_fragment_id(
self, fragment_id, new_name=None, new_fragment=True
):
if self.parent is None:
raise Exception(
"Cannot update fragment without a parent genome. Try editing instead."
)
f = [x for x in self.fragments.filter(id=fragment_id)]
if len(f) != 1:
raise Exception(
"Zero or more than one fragments have ID %s" % (fragment_id,)
)
new_name = f[0].name if new_name is None else new_name
u = f[0].indexed_fragment()
if new_fragment is True:
u = u.update(new_name)
yield u
if new_fragment is True:
self._add_updated_fragment(u)
def add_fragment(self, name, sequence, circular=False):
if len(sequence) == 0:
raise Exception("Cannot create a fragment of length zero")
new_fragment = Fragment.create_with_sequence(
name=name, sequence=sequence, circular=circular
)
self.genome_fragment_set.create(fragment=new_fragment, inherited=False)
return new_fragment
def _add_updated_fragment(self, fragment):
existing_fragment_ids = [f.id for f in self.fragments.all()]
if fragment.parent_id in existing_fragment_ids:
gf = self.genome_fragment_set.get(fragment=fragment.parent)
gf.fragment = fragment
gf.inherited = False
gf.save()
else:
raise Exception("Fragment parent not part of the genome")
| mit | 4,635,774,173,059,254,000 | 36 | 86 | 0.588514 | false |
shuggiefisher/django-on-google-app-engine-base | django/db/models/fields/subclassing.py | 229 | 4356 | """
Convenience routines for creating non-trivial Field subclasses, as well as
backwards compatibility utilities.
Add SubfieldBase as the __metaclass__ for your Field subclass, implement
to_python() and the other necessary methods and everything will work seamlessly.
"""
from inspect import getargspec
from warnings import warn
def call_with_connection(func):
arg_names, varargs, varkwargs, defaults = getargspec(func)
updated = ('connection' in arg_names or varkwargs)
if not updated:
warn("A Field class whose %s method hasn't been updated to take a "
"`connection` argument." % func.__name__,
DeprecationWarning, stacklevel=3)
def inner(*args, **kwargs):
if 'connection' not in kwargs:
from django.db import connection
kwargs['connection'] = connection
warn("%s has been called without providing a connection argument. " %
func.__name__, DeprecationWarning,
stacklevel=2)
if updated:
return func(*args, **kwargs)
if 'connection' in kwargs:
del kwargs['connection']
return func(*args, **kwargs)
return inner
def call_with_connection_and_prepared(func):
arg_names, varargs, varkwargs, defaults = getargspec(func)
updated = (
('connection' in arg_names or varkwargs) and
('prepared' in arg_names or varkwargs)
)
if not updated:
warn("A Field class whose %s method hasn't been updated to take "
"`connection` and `prepared` arguments." % func.__name__,
DeprecationWarning, stacklevel=3)
def inner(*args, **kwargs):
if 'connection' not in kwargs:
from django.db import connection
kwargs['connection'] = connection
warn("%s has been called without providing a connection argument. " %
func.__name__, DeprecationWarning,
stacklevel=2)
if updated:
return func(*args, **kwargs)
if 'connection' in kwargs:
del kwargs['connection']
if 'prepared' in kwargs:
del kwargs['prepared']
return func(*args, **kwargs)
return inner
class LegacyConnection(type):
"""
A metaclass to normalize arguments give to the get_db_prep_* and db_type
methods on fields.
"""
def __new__(cls, name, bases, attrs):
new_cls = super(LegacyConnection, cls).__new__(cls, name, bases, attrs)
for attr in ('db_type', 'get_db_prep_save'):
setattr(new_cls, attr, call_with_connection(getattr(new_cls, attr)))
for attr in ('get_db_prep_lookup', 'get_db_prep_value'):
setattr(new_cls, attr, call_with_connection_and_prepared(getattr(new_cls, attr)))
return new_cls
class SubfieldBase(LegacyConnection):
"""
A metaclass for custom Field subclasses. This ensures the model's attribute
has the descriptor protocol attached to it.
"""
def __new__(cls, name, bases, attrs):
new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = make_contrib(
new_class, attrs.get('contribute_to_class')
)
return new_class
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
def make_contrib(superclass, func=None):
"""
Returns a suitable contribute_to_class() method for the Field subclass.
If 'func' is passed in, it is the existing contribute_to_class() method on
the subclass and it is called before anything else. It is assumed in this
case that the existing contribute_to_class() calls all the necessary
superclass methods.
"""
def contribute_to_class(self, cls, name):
if func:
func(self, cls, name)
else:
super(superclass, self).contribute_to_class(cls, name)
setattr(cls, self.name, Creator(self))
return contribute_to_class
| bsd-3-clause | -2,189,686,644,350,454,000 | 36.230769 | 93 | 0.626492 | false |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| unlicense | -6,878,377,739,908,298,000 | 35.897321 | 78 | 0.643557 | false |
tthtlc/volatility | volatility/plugins/overlays/linux/linux64.py | 44 | 1595 | # Volatility
# Copyright (c) 2011 Michael Cohen <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
Support for 64 bit Linux systems.
@author: Michael Cohen
@license: GNU General Public License 2.0
@contact: [email protected]
"""
from volatility import obj
class VolatilityDTB(obj.VolatilityMagic):
"""A scanner for DTB values."""
def generate_suggestions(self):
"""Tries to locate the DTB."""
profile = self.obj_vm.profile
yield profile.get_symbol("init_level4_pgt") - 0xffffffff80000000
class Linux64ObjectClasses(obj.ProfileModification):
""" Makes slight changes to the DTB checker """
conditions = {'os': lambda x: x == 'linux',
'memory_model': lambda x: x == '64bit'}
before = ['LinuxObjectClasses']
def modification(self, profile):
profile.object_classes.update({
'VolatilityDTB': VolatilityDTB
})
| gpl-2.0 | 5,811,200,238,909,870,000 | 32.93617 | 72 | 0.684013 | false |
ujjvala-addsol/addsol_hr | openerp/addons/account/account_financial_report.py | 339 | 7636 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
# ---------------------------------------------------------
# Account Financial Report
# ---------------------------------------------------------
class account_financial_report(osv.osv):
_name = "account.financial.report"
_description = "Account Report"
def _get_level(self, cr, uid, ids, field_name, arg, context=None):
'''Returns a dictionary with key=the ID of a record and value = the level of this
record in the tree structure.'''
res = {}
for report in self.browse(cr, uid, ids, context=context):
level = 0
if report.parent_id:
level = report.parent_id.level + 1
res[report.id] = level
return res
def _get_children_by_order(self, cr, uid, ids, context=None):
'''returns a dictionary with the key= the ID of a record and value = all its children,
computed recursively, and sorted by sequence. Ready for the printing'''
res = []
for id in ids:
res.append(id)
ids2 = self.search(cr, uid, [('parent_id', '=', id)], order='sequence ASC', context=context)
res += self._get_children_by_order(cr, uid, ids2, context=context)
return res
def _get_balance(self, cr, uid, ids, field_names, args, context=None):
'''returns a dictionary with key=the ID of a record and value=the balance amount
computed for this record. If the record is of type :
'accounts' : it's the sum of the linked accounts
'account_type' : it's the sum of leaf accoutns with such an account_type
'account_report' : it's the amount of the related report
'sum' : it's the sum of the children of this record (aka a 'view' record)'''
account_obj = self.pool.get('account.account')
res = {}
for report in self.browse(cr, uid, ids, context=context):
if report.id in res:
continue
res[report.id] = dict((fn, 0.0) for fn in field_names)
if report.type == 'accounts':
# it's the sum of the linked accounts
for a in report.account_ids:
for field in field_names:
res[report.id][field] += getattr(a, field)
elif report.type == 'account_type':
# it's the sum the leaf accounts with such an account type
report_types = [x.id for x in report.account_type_ids]
account_ids = account_obj.search(cr, uid, [('user_type','in', report_types), ('type','!=','view')], context=context)
for a in account_obj.browse(cr, uid, account_ids, context=context):
for field in field_names:
res[report.id][field] += getattr(a, field)
elif report.type == 'account_report' and report.account_report_id:
# it's the amount of the linked report
res2 = self._get_balance(cr, uid, [report.account_report_id.id], field_names, False, context=context)
for key, value in res2.items():
for field in field_names:
res[report.id][field] += value[field]
elif report.type == 'sum':
# it's the sum of the children of this account.report
res2 = self._get_balance(cr, uid, [rec.id for rec in report.children_ids], field_names, False, context=context)
for key, value in res2.items():
for field in field_names:
res[report.id][field] += value[field]
return res
_columns = {
'name': fields.char('Report Name', required=True, translate=True),
'parent_id': fields.many2one('account.financial.report', 'Parent'),
'children_ids': fields.one2many('account.financial.report', 'parent_id', 'Account Report'),
'sequence': fields.integer('Sequence'),
'balance': fields.function(_get_balance, 'Balance', multi='balance'),
'debit': fields.function(_get_balance, 'Debit', multi='balance'),
'credit': fields.function(_get_balance, 'Credit', multi="balance"),
'level': fields.function(_get_level, string='Level', store=True, type='integer'),
'type': fields.selection([
('sum','View'),
('accounts','Accounts'),
('account_type','Account Type'),
('account_report','Report Value'),
],'Type'),
'account_ids': fields.many2many('account.account', 'account_account_financial_report', 'report_line_id', 'account_id', 'Accounts'),
'account_report_id': fields.many2one('account.financial.report', 'Report Value'),
'account_type_ids': fields.many2many('account.account.type', 'account_account_financial_report_type', 'report_id', 'account_type_id', 'Account Types'),
'sign': fields.selection([(-1, 'Reverse balance sign'), (1, 'Preserve balance sign')], 'Sign on Reports', required=True, help='For accounts that are typically more debited than credited and that you would like to print as negative amounts in your reports, you should reverse the sign of the balance; e.g.: Expense account. The same applies for accounts that are typically more credited than debited and that you would like to print as positive amounts in your reports; e.g.: Income account.'),
'display_detail': fields.selection([
('no_detail','No detail'),
('detail_flat','Display children flat'),
('detail_with_hierarchy','Display children with hierarchy')
], 'Display details'),
'style_overwrite': fields.selection([
(0, 'Automatic formatting'),
(1,'Main Title 1 (bold, underlined)'),
(2,'Title 2 (bold)'),
(3,'Title 3 (bold, smaller)'),
(4,'Normal Text'),
(5,'Italic Text (smaller)'),
(6,'Smallest Text'),
],'Financial Report Style', help="You can set up here the format you want this record to be displayed. If you leave the automatic formatting, it will be computed based on the financial reports hierarchy (auto-computed field 'level')."),
}
_defaults = {
'type': 'sum',
'display_detail': 'detail_flat',
'sign': 1,
'style_overwrite': 0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,442,987,804,468,047,000 | 52.739437 | 501 | 0.588914 | false |
lindsayad/sympy | sympy/interactive/tests/test_ipythonprinting.py | 11 | 6263 | """Tests that the IPython printing module is properly loaded. """
from sympy.core.compatibility import u
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
from sympy.utilities.pytest import raises
# run_cell was added in IPython 0.11
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] == "pi"
assert app.user_ns['a2']['text/plain'] == "pi**2"
else:
assert app.user_ns['a'][0]['text/plain'] == "pi"
assert app.user_ns['a2'][0]['text/plain'] == "pi**2"
# Load printing extension
app.run_cell("from sympy import init_printing")
app.run_cell("init_printing()")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2']['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
else:
assert app.user_ns['a'][0]['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2'][0]['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
def test_print_builtin_option():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
app.run_cell("from sympy import init_printing")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : Unicode of Python2 is equivalent to str in Python3. In Python 3 we have one
# text type: str which holds Unicode data and two byte types bytes and bytearray.
# XXX: How can we make this ignore the terminal width? This test fails if
# the terminal is too narrow.
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
# If we enable the default printing, then the dictionary's should render
# as a LaTeX version of the whole dict: ${\pi: 3.14, n_i: 3}$
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
latex = app.user_ns['a']['text/latex']
else:
text = app.user_ns['a'][0]['text/plain']
latex = app.user_ns['a'][0]['text/latex']
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
assert latex == r'$$\left \{ n_{i} : 3, \quad \pi : 3.14\right \}$$'
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True, print_builtin=False)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : Unicode of Python2 is equivalent to str in Python3. In Python 3 we have one
# text type: str which holds Unicode data and two byte types bytes and bytearray.
# Python 3.3.3 + IPython 0.13.2 gives: '{n_i: 3, pi: 3.14}'
# Python 3.3.3 + IPython 1.1.0 gives: '{n_i: 3, pi: 3.14}'
# Python 2.7.5 + IPython 1.1.0 gives: '{pi: 3.14, n_i: 3}'
assert text in ("{pi: 3.14, n_i: 3}", "{n_i: 3, pi: 3.14}")
def test_matplotlib_bad_latex():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("import IPython")
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import init_printing, Matrix")
app.run_cell("init_printing(use_latex='matplotlib')")
# The png formatter is not enabled by default in this context
app.run_cell("inst.display_formatter.formatters['image/png'].enabled = True")
# Make sure no warnings are raised by IPython
app.run_cell("import warnings")
app.run_cell("warnings.simplefilter('error', IPython.core.formatters.FormatterWarning)")
# This should not raise an exception
app.run_cell("a = format(Matrix([1, 2, 3]))")
# issue 9799
app.run_cell("from sympy import Piecewise, Symbol, Eq")
app.run_cell("x = Symbol('x'); pw = format(Piecewise((1, Eq(x, 0)), (0, True)))")
| bsd-3-clause | 2,265,527,711,930,941,200 | 46.090226 | 102 | 0.612486 | false |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/python/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/agw/ribbon/art_aui.py | 6 | 55653 | """
L{RibbonAUIArtProvider} is responsible for drawing all the components of the ribbon
interface using an AUI-compatible appearance.
Description
===========
This allows a ribbon bar to have a pluggable look-and-feel, while retaining the same
underlying behaviour. As a single art provider is used for all ribbon components, a
ribbon bar usually has a consistent (though unique) appearance.
By default, a L{RibbonBar} uses an instance of a class called `RibbonDefaultArtProvider`,
which resolves to `RibbonAUIArtProvider`, `RibbonMSWArtProvider`, or `RibbonOSXArtProvider`
- whichever is most appropriate to the current platform. These art providers are all
slightly configurable with regard to colours and fonts, but for larger modifications,
you can derive from one of these classes, or write a completely new art provider class.
Call L{RibbonBar.SetArtProvider} to change the art provider being used.
See Also
========
L{RibbonBar}
"""
import wx
from math import cos
from math import pi as M_PI
from art_msw import RibbonMSWArtProvider
from art_internal import RibbonHSLColour, RibbonShiftLuminance, RibbonInterpolateColour
import bar as BAR, panel as PANEL
from art import *
if wx.Platform == "__WXMAC__":
import Carbon.Appearance
def FontFromFont(original):
newFont = wx.Font(original.GetPointSize(), original.GetFamily(),
original.GetStyle(), original.GetWeight(), original.GetUnderlined(),
original.GetFaceName(), original.GetEncoding())
return newFont
class RibbonAUIArtProvider(RibbonMSWArtProvider):
def __init__(self):
RibbonMSWArtProvider.__init__(self)
if wx.Platform == "__WXMAC__":
if hasattr(wx, 'MacThemeColour'):
base_colour = wx.MacThemeColour(Carbon.Appearance.kThemeBrushToolbarBackground)
else:
brush = wx.Brush(wx.BLACK)
brush.MacSetTheme(Carbon.Appearance.kThemeBrushToolbarBackground)
base_colour = brush.GetColour()
else:
base_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE)
self.SetColourScheme(base_colour, wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT),
wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT))
self._tab_active_label_font = FontFromFont(self._tab_label_font)
self._tab_active_label_font.SetWeight(wx.FONTWEIGHT_BOLD)
self._page_border_left = 1
self._page_border_right = 1
self._page_border_top = 1
self._page_border_bottom = 2
self._tab_separation_size = 0
self._gallery_bitmap_padding_left_size = 3
self._gallery_bitmap_padding_right_size = 3
self._gallery_bitmap_padding_top_size = 3
self._gallery_bitmap_padding_bottom_size = 3
def Clone(self):
"""
Create a new art provider which is a clone of this one.
"""
copy = RibbonAUIArtProvider()
self.CloneTo(copy)
copy._tab_ctrl_background_colour = self._tab_ctrl_background_colour
copy._tab_ctrl_background_gradient_colour = self._tab_ctrl_background_gradient_colour
copy._panel_label_background_colour = self._panel_label_background_colour
copy._panel_label_background_gradient_colour = self._panel_label_background_gradient_colour
copy._panel_hover_label_background_colour = self._panel_hover_label_background_colour
copy._panel_hover_label_background_gradient_colour = self._panel_hover_label_background_gradient_colour
copy._background_brush = self._background_brush
copy._tab_active_top_background_brush = self._tab_active_top_background_brush
copy._tab_hover_background_brush = self._tab_hover_background_brush
copy._button_bar_hover_background_brush = self._button_bar_hover_background_brush
copy._button_bar_active_background_brush = self._button_bar_active_background_brush
copy._gallery_button_active_background_brush = self._gallery_button_active_background_brush
copy._gallery_button_hover_background_brush = self._gallery_button_hover_background_brush
copy._gallery_button_disabled_background_brush = self._gallery_button_disabled_background_brush
copy._toolbar_hover_borden_pen = self._toolbar_hover_borden_pen
copy._tool_hover_background_brush = self._tool_hover_background_brush
copy._tool_active_background_brush = self._tool_active_background_brush
return copy
def SetFont(self, id, font):
"""
Set the value of a certain font setting to the value.
can be one of the font values of `RibbonArtSetting`.
:param `id`: the font id;
:param `font`: MISSING DESCRIPTION.
"""
RibbonMSWArtProvider.SetFont(self, id, font)
if id == RIBBON_ART_TAB_LABEL_FONT:
self._tab_active_label_font = FontFromFont(self._tab_label_font)
self._tab_active_label_font.SetWeight(wx.FONTWEIGHT_BOLD)
def GetColour(self, id):
"""
Get the value of a certain colour setting.
can be one of the colour values of `RibbonArtSetting`.
:param `id`: the colour id.
"""
if id in [RIBBON_ART_PAGE_BACKGROUND_COLOUR, RIBBON_ART_PAGE_BACKGROUND_GRADIENT_COLOUR]:
return self._background_brush.GetColour()
elif id == RIBBON_ART_TAB_CTRL_BACKGROUND_COLOUR:
return self._tab_ctrl_background_colour
elif id == RIBBON_ART_TAB_CTRL_BACKGROUND_GRADIENT_COLOUR:
return self._tab_ctrl_background_gradient_colour
elif id in [RIBBON_ART_TAB_ACTIVE_BACKGROUND_TOP_COLOUR, RIBBON_ART_TAB_ACTIVE_BACKGROUND_TOP_GRADIENT_COLOUR]:
return self._tab_active_top_background_brush.GetColour()
elif id in [RIBBON_ART_TAB_HOVER_BACKGROUND_COLOUR, RIBBON_ART_TAB_HOVER_BACKGROUND_GRADIENT_COLOUR]:
return self._tab_hover_background_brush.GetColour()
elif id == RIBBON_ART_PANEL_LABEL_BACKGROUND_COLOUR:
return self._panel_label_background_colour
elif id == RIBBON_ART_PANEL_LABEL_BACKGROUND_GRADIENT_COLOUR:
return self._panel_label_background_gradient_colour
elif id == RIBBON_ART_PANEL_HOVER_LABEL_BACKGROUND_COLOUR:
return self._panel_hover_label_background_colour
elif id == RIBBON_ART_PANEL_HOVER_LABEL_BACKGROUND_GRADIENT_COLOUR:
return self._panel_hover_label_background_gradient_colour
elif id in [RIBBON_ART_BUTTON_BAR_HOVER_BACKGROUND_COLOUR, RIBBON_ART_BUTTON_BAR_HOVER_BACKGROUND_GRADIENT_COLOUR]:
return self._button_bar_hover_background_brush.GetColour()
elif id in [RIBBON_ART_GALLERY_BUTTON_HOVER_BACKGROUND_COLOUR, RIBBON_ART_GALLERY_BUTTON_HOVER_BACKGROUND_GRADIENT_COLOUR]:
return self._gallery_button_hover_background_brush.GetColour()
elif id in [RIBBON_ART_GALLERY_BUTTON_ACTIVE_BACKGROUND_COLOUR, RIBBON_ART_GALLERY_BUTTON_ACTIVE_BACKGROUND_GRADIENT_COLOUR]:
return self._gallery_button_active_background_brush.GetColour()
elif id in [RIBBON_ART_GALLERY_BUTTON_DISABLED_BACKGROUND_COLOUR, RIBBON_ART_GALLERY_BUTTON_DISABLED_BACKGROUND_GRADIENT_COLOUR]:
return self._gallery_button_disabled_background_brush.GetColour()
else:
return RibbonMSWArtProvider.GetColour(self, id)
def SetColour(self, id, colour):
"""
Set the value of a certain colour setting to the value.
can be one of the colour values of `RibbonArtSetting`, though not all colour
settings will have an affect on every art provider.
:param `id`: the colour id;
:param `colour`: MISSING DESCRIPTION.
:see: L{SetColourScheme}
"""
if id in [RIBBON_ART_PAGE_BACKGROUND_COLOUR, RIBBON_ART_PAGE_BACKGROUND_GRADIENT_COLOUR]:
self._background_brush.SetColour(colour)
elif id == RIBBON_ART_TAB_CTRL_BACKGROUND_COLOUR:
self._tab_ctrl_background_colour = colour
elif id == RIBBON_ART_TAB_CTRL_BACKGROUND_GRADIENT_COLOUR:
self._tab_ctrl_background_gradient_colour = colour
elif id in [RIBBON_ART_TAB_ACTIVE_BACKGROUND_TOP_COLOUR, RIBBON_ART_TAB_ACTIVE_BACKGROUND_TOP_GRADIENT_COLOUR]:
self._tab_active_top_background_brush.SetColour(colour)
elif id in [RIBBON_ART_TAB_HOVER_BACKGROUND_COLOUR, RIBBON_ART_TAB_HOVER_BACKGROUND_GRADIENT_COLOUR]:
self._tab_hover_background_brush.SetColour(colour)
elif id == RIBBON_ART_PANEL_LABEL_BACKGROUND_COLOUR:
self._panel_label_background_colour = colour
elif id == RIBBON_ART_PANEL_LABEL_BACKGROUND_GRADIENT_COLOUR:
self._panel_label_background_gradient_colour = colour
elif id in [RIBBON_ART_BUTTON_BAR_HOVER_BACKGROUND_COLOUR, RIBBON_ART_BUTTON_BAR_HOVER_BACKGROUND_GRADIENT_COLOUR]:
self._button_bar_hover_background_brush.SetColour(colour)
elif id in [RIBBON_ART_GALLERY_BUTTON_HOVER_BACKGROUND_COLOUR, RIBBON_ART_GALLERY_BUTTON_HOVER_BACKGROUND_GRADIENT_COLOUR]:
self._gallery_button_hover_background_brush.SetColour(colour)
elif id in [RIBBON_ART_GALLERY_BUTTON_ACTIVE_BACKGROUND_COLOUR, RIBBON_ART_GALLERY_BUTTON_ACTIVE_BACKGROUND_GRADIENT_COLOUR]:
self._gallery_button_active_background_brush.SetColour(colour)
elif id in [RIBBON_ART_GALLERY_BUTTON_DISABLED_BACKGROUND_COLOUR, RIBBON_ART_GALLERY_BUTTON_DISABLED_BACKGROUND_GRADIENT_COLOUR]:
self._gallery_button_disabled_background_brush.SetColour(colour)
else:
RibbonMSWArtProvider.SetColour(self, id, colour)
def SetColourScheme(self, primary, secondary, tertiary):
"""
Set all applicable colour settings from a few base colours.
Uses any or all of the three given colours to create a colour scheme, and then
sets all colour settings which are relevant to the art provider using that
scheme. Note that some art providers may not use the tertiary colour for
anything, and some may not use the secondary colour either.
:param `primary`: MISSING DESCRIPTION;
:param `secondary`: MISSING DESCRIPTION;
:param `tertiary`: MISSING DESCRIPTION.
:see: L{SetColour}, L{RibbonMSWArtProvider.GetColourScheme}
"""
primary_hsl = RibbonHSLColour(primary)
secondary_hsl = RibbonHSLColour(secondary)
tertiary_hsl = RibbonHSLColour(tertiary)
# Map primary & secondary luminance from [0, 1] to [0.15, 0.85]
primary_hsl.luminance = cos(primary_hsl.luminance * M_PI) * -0.35 + 0.5
secondary_hsl.luminance = cos(secondary_hsl.luminance * M_PI) * -0.35 + 0.5
# TODO: Remove next line once this provider stops piggybacking MSW
RibbonMSWArtProvider.SetColourScheme(self, primary, secondary, tertiary)
self._tab_ctrl_background_colour = RibbonShiftLuminance(primary_hsl, 0.9).ToRGB()
self._tab_ctrl_background_gradient_colour = RibbonShiftLuminance(primary_hsl, 1.7).ToRGB()
self._tab_border_pen = wx.Pen(RibbonShiftLuminance(primary_hsl, 0.75).ToRGB())
self._tab_label_colour = RibbonShiftLuminance(primary_hsl, 0.1).ToRGB()
self._tab_hover_background_top_colour = primary_hsl.ToRGB()
self._tab_hover_background_top_gradient_colour = RibbonShiftLuminance(primary_hsl, 1.6).ToRGB()
self._tab_hover_background_brush = wx.Brush(self._tab_hover_background_top_colour)
self._tab_active_background_colour = self._tab_ctrl_background_gradient_colour
self._tab_active_background_gradient_colour = primary_hsl.ToRGB()
self._tab_active_top_background_brush = wx.Brush(self._tab_active_background_colour)
self._panel_label_colour = self._tab_label_colour
self._panel_minimised_label_colour = self._panel_label_colour
self._panel_hover_label_colour = tertiary_hsl.ToRGB()
self._page_border_pen = self._tab_border_pen
self._panel_border_pen = self._tab_border_pen
self._background_brush = wx.Brush(primary_hsl.ToRGB())
self._page_hover_background_colour = RibbonShiftLuminance(primary_hsl, 1.5).ToRGB()
self._page_hover_background_gradient_colour = RibbonShiftLuminance(primary_hsl, 0.9).ToRGB()
self._panel_label_background_colour = RibbonShiftLuminance(primary_hsl, 0.85).ToRGB()
self._panel_label_background_gradient_colour = RibbonShiftLuminance(primary_hsl, 0.97).ToRGB()
self._panel_hover_label_background_gradient_colour = secondary_hsl.ToRGB()
self._panel_hover_label_background_colour = secondary_hsl.Lighter(0.2).ToRGB()
self._button_bar_hover_border_pen = wx.Pen(secondary_hsl.ToRGB())
self._button_bar_hover_background_brush = wx.Brush(RibbonShiftLuminance(secondary_hsl, 1.7).ToRGB())
self._button_bar_active_background_brush = wx.Brush(RibbonShiftLuminance(secondary_hsl, 1.4).ToRGB())
self._button_bar_label_colour = self._tab_label_colour
self._gallery_border_pen = self._tab_border_pen
self._gallery_item_border_pen = self._button_bar_hover_border_pen
self._gallery_hover_background_brush = wx.Brush(RibbonShiftLuminance(primary_hsl, 1.2).ToRGB())
self._gallery_button_background_colour = self._page_hover_background_colour
self._gallery_button_background_gradient_colour = self._page_hover_background_gradient_colour
self._gallery_button_hover_background_brush = self._button_bar_hover_background_brush
self._gallery_button_active_background_brush = self._button_bar_active_background_brush
self._gallery_button_disabled_background_brush = wx.Brush(primary_hsl.Desaturated(0.15).ToRGB())
self.SetColour(RIBBON_ART_GALLERY_BUTTON_FACE_COLOUR, RibbonShiftLuminance(primary_hsl, 0.1).ToRGB())
self.SetColour(RIBBON_ART_GALLERY_BUTTON_DISABLED_FACE_COLOUR, wx.Colour(128, 128, 128))
self.SetColour(RIBBON_ART_GALLERY_BUTTON_ACTIVE_FACE_COLOUR, RibbonShiftLuminance(secondary_hsl, 0.1).ToRGB())
self.SetColour(RIBBON_ART_GALLERY_BUTTON_HOVER_FACE_COLOUR, RibbonShiftLuminance(secondary_hsl, 0.1).ToRGB())
self._toolbar_border_pen = self._tab_border_pen
self.SetColour(RIBBON_ART_TOOLBAR_FACE_COLOUR, RibbonShiftLuminance(primary_hsl, 0.1).ToRGB())
self._tool_background_colour = self._page_hover_background_colour
self._tool_background_gradient_colour = self._page_hover_background_gradient_colour
self._toolbar_hover_borden_pen = self._button_bar_hover_border_pen
self._tool_hover_background_brush = self._button_bar_hover_background_brush
self._tool_active_background_brush = self._button_bar_active_background_brush
def DrawTabCtrlBackground(self, dc, wnd, rect):
"""
Draw the background of the tab region of a ribbon bar.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto;
:param `rect`: The rectangle within which to draw.
"""
gradient_rect = wx.Rect(*rect)
gradient_rect.height -= 1
dc.GradientFillLinear(gradient_rect, self._tab_ctrl_background_colour, self._tab_ctrl_background_gradient_colour, wx.SOUTH)
dc.SetPen(self._tab_border_pen)
dc.DrawLine(rect.x, rect.GetBottom(), rect.GetRight()+1, rect.GetBottom())
def GetTabCtrlHeight(self, dc, wnd, pages):
"""
Calculate the height (in pixels) of the tab region of a ribbon bar.
Note that as the tab region can contain scroll buttons, the height should be
greater than or equal to the minimum height for a tab scroll button.
:param `dc`: A device context to use when one is required for size calculations;
:param `wnd`: The window onto which the tabs will eventually be drawn;
:param `pages`: The tabs which will acquire the returned height.
"""
text_height = 0
icon_height = 0
if len(pages) <= 1 and (self._flags & RIBBON_BAR_ALWAYS_SHOW_TABS) == 0:
# To preserve space, a single tab need not be displayed. We still need
# one pixel of border though.
return 1
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS:
dc.SetFont(self._tab_active_label_font)
text_height = dc.GetTextExtent("ABCDEFXj")[1]
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS:
for info in pages:
if info.page.GetIcon().IsOk():
icon_height = max(icon_height, info.page.GetIcon().GetHeight())
return max(text_height, icon_height) + 10
def DrawTab(self, dc, wnd, tab):
"""
Draw a single tab in the tab region of a ribbon bar.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto (not the L{RibbonPage}
associated with the tab being drawn);
:param `tab`: The rectangle within which to draw, and also the tab label,
icon, and state (active and/or hovered). The drawing rectangle will be
entirely within a rectangle on the same device context previously painted
with L{DrawTabCtrlBackground}. The rectangle's width will be at least the
minimum value returned by L{GetBarTabWidth}, and height will be the value
returned by L{GetTabCtrlHeight}.
"""
if tab.rect.height <= 1:
return
dc.SetFont(self._tab_label_font)
dc.SetPen(wx.TRANSPARENT_PEN)
if tab.active or tab.hovered:
if tab.active:
dc.SetFont(self._tab_active_label_font)
dc.SetBrush(self._background_brush)
dc.DrawRectangle(tab.rect.x, tab.rect.y + tab.rect.height - 1, tab.rect.width - 1, 1)
grad_rect = wx.Rect(*tab.rect)
grad_rect.height -= 4
grad_rect.width -= 1
grad_rect.height /= 2
grad_rect.y = grad_rect.y + tab.rect.height - grad_rect.height - 1
dc.SetBrush(self._tab_active_top_background_brush)
dc.DrawRectangle(tab.rect.x, tab.rect.y + 3, tab.rect.width - 1, grad_rect.y - tab.rect.y - 3)
dc.GradientFillLinear(grad_rect, self._tab_active_background_colour, self._tab_active_background_gradient_colour, wx.SOUTH)
else:
btm_rect = wx.Rect(*tab.rect)
btm_rect.height -= 4
btm_rect.width -= 1
btm_rect.height /= 2
btm_rect.y = btm_rect.y + tab.rect.height - btm_rect.height - 1
dc.SetBrush(self._tab_hover_background_brush)
dc.DrawRectangle(btm_rect.x, btm_rect.y, btm_rect.width, btm_rect.height)
grad_rect = wx.Rect(*tab.rect)
grad_rect.width -= 1
grad_rect.y += 3
grad_rect.height = btm_rect.y - grad_rect.y
dc.GradientFillLinear(grad_rect, self._tab_hover_background_top_colour, self._tab_hover_background_top_gradient_colour, wx.SOUTH)
border_points = [wx.Point() for i in xrange(5)]
border_points[0] = wx.Point(0, 3)
border_points[1] = wx.Point(1, 2)
border_points[2] = wx.Point(tab.rect.width - 3, 2)
border_points[3] = wx.Point(tab.rect.width - 1, 4)
border_points[4] = wx.Point(tab.rect.width - 1, tab.rect.height - 1)
dc.SetPen(self._tab_border_pen)
dc.DrawLines(border_points, tab.rect.x, tab.rect.y)
old_clip = dc.GetClippingRect()
is_first_tab = False
bar = tab.page.GetParent()
icon = wx.NullBitmap
if isinstance(bar, BAR.RibbonBar) and bar.GetPage(0) == tab.page:
is_first_tab = True
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS:
icon = tab.page.GetIcon()
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS == 0:
x = tab.rect.x + (tab.rect.width - icon.GetWidth()) / 2
dc.DrawBitmap(icon, x, tab.rect.y + 1 + (tab.rect.height - 1 - icon.GetHeight()) / 2, True)
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS:
label = tab.page.GetLabel()
if label.strip():
dc.SetTextForeground(self._tab_label_colour)
dc.SetBackgroundMode(wx.TRANSPARENT)
offset = 0
if icon.IsOk():
offset += icon.GetWidth() + 2
text_width, text_height = dc.GetTextExtent(label)
x = (tab.rect.width - 2 - text_width - offset) / 2
if x > 8:
x = 8
elif x < 1:
x = 1
width = tab.rect.width - x - 2
x += tab.rect.x + offset
y = tab.rect.y + (tab.rect.height - text_height) / 2
if icon.IsOk():
dc.DrawBitmap(icon, x - offset, tab.rect.y + (tab.rect.height - icon.GetHeight()) / 2, True)
dc.SetClippingRegion(x, tab.rect.y, width, tab.rect.height)
dc.DrawText(label, x, y)
# Draw the left hand edge of the tab only for the first tab (subsequent
# tabs use the right edge of the prior tab as their left edge). As this is
# outside the rectangle for the tab, only draw it if the leftmost part of
# the tab is within the clip rectangle (the clip region has to be cleared
# to draw outside the tab).
if is_first_tab and old_clip.x <= tab.rect.x and tab.rect.x < old_clip.x + old_clip.width:
dc.DestroyClippingRegion()
dc.DrawLine(tab.rect.x - 1, tab.rect.y + 4, tab.rect.x - 1, tab.rect.y + tab.rect.height - 1)
def GetBarTabWidth(self, dc, wnd, label, bitmap, ideal=None, small_begin_need_separator=None,
small_must_have_separator=None, minimum=None):
"""
Calculate the ideal and minimum width (in pixels) of a tab in a ribbon bar.
:param `dc`: A device context to use when one is required for size calculations;
:param `wnd`: The window onto which the tab will eventually be drawn;
:param `label`: The tab's label (or wx.EmptyString if it has none);
:param `bitmap`: The tab's icon (or wx.NullBitmap if it has none);
:param `ideal`: The ideal width (in pixels) of the tab;
:param `small_begin_need_separator`: A size less than the size, at which a tab
separator should begin to be drawn (i.e. drawn, but still fairly transparent);
:param `small_must_have_separator`: A size less than the size, at which a tab
separator must be drawn (i.e. drawn at full opacity);
:param `minimum`: A size less than the size, and greater than or equal to zero,
which is the minimum pixel width for the tab.
"""
width = mini = 0
if self._flags & RIBBON_BAR_SHOW_PAGE_LABELS and label.strip():
dc.SetFont(self._tab_active_label_font)
width += dc.GetTextExtent(label)[0]
mini += min(30, width) # enough for a few chars
if bitmap.IsOk():
# gap between label and bitmap
width += 4
mini += 2
if self._flags & RIBBON_BAR_SHOW_PAGE_ICONS and bitmap.IsOk():
width += bitmap.GetWidth()
mini += bitmap.GetWidth()
ideal = width + 16
small_begin_need_separator = mini
small_must_have_separator = mini
minimum = mini
return ideal, small_begin_need_separator, small_must_have_separator, minimum
def DrawTabSeparator(self, dc, wnd, rect, visibility):
"""
Draw a separator between two tabs in a ribbon bar.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto;
:param `rect`: The rectangle within which to draw, which will be entirely
within a rectangle on the same device context previously painted with
L{DrawTabCtrlBackground};
:param `visibility`: The opacity with which to draw the separator. Values
are in the range [0, 1], with 0 being totally transparent, and 1 being totally
opaque.
"""
# No explicit separators between tabs
pass
def DrawPageBackground(self, dc, wnd, rect):
"""
Draw the background of a ribbon page.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto (which is commonly the
L{RibbonPage} whose background is being drawn, but doesn't have to be);
:param `rect`: The rectangle within which to draw.
:see: L{RibbonMSWArtProvider.GetPageBackgroundRedrawArea}
"""
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._background_brush)
dc.DrawRectangle(rect.x + 1, rect.y, rect.width - 2, rect.height - 1)
dc.SetPen(self._page_border_pen)
dc.DrawLine(rect.x, rect.y, rect.x, rect.y + rect.height)
dc.DrawLine(rect.GetRight(), rect.y, rect.GetRight(), rect.y +rect.height)
dc.DrawLine(rect.x, rect.GetBottom(), rect.GetRight()+1, rect.GetBottom())
def GetScrollButtonMinimumSize(self, dc, wnd, style):
"""
Calculate the minimum size (in pixels) of a scroll button.
:param `dc`: A device context to use when one is required for size calculations;
:param `wnd`: The window onto which the scroll button will eventually be drawn;
:param `style`: A combination of flags from `RibbonScrollButtonStyle`, including
a direction, and a for flag (state flags may be given too, but should be ignored,
as a button should retain a constant size, regardless of its state).
"""
return wx.Size(11, 11)
def DrawScrollButton(self, dc, wnd, rect, style):
"""
Draw a ribbon-style scroll button.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto;
:param `rect`: The rectangle within which to draw. The size of this rectangle
will be at least the size returned by L{GetScrollButtonMinimumSize} for a
scroll button with the same style. For tab scroll buttons, this rectangle
will be entirely within a rectangle on the same device context previously
painted with L{DrawTabCtrlBackground}, but this is not guaranteed for other
types of button (for example, page scroll buttons will not be painted on
an area previously painted with L{DrawPageBackground});
:param `style`: A combination of flags from `RibbonScrollButtonStyle`,
including a direction, a for flag, and one or more states.
"""
true_rect = wx.Rect(*rect)
arrow_points = [wx.Point() for i in xrange(3)]
if style & RIBBON_SCROLL_BTN_FOR_MASK == RIBBON_SCROLL_BTN_FOR_TABS:
true_rect.y += 2
true_rect.height -= 2
dc.SetPen(self._tab_border_pen)
else:
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
dc.SetPen(self._page_border_pen)
result = style & RIBBON_SCROLL_BTN_DIRECTION_MASK
if result == RIBBON_SCROLL_BTN_LEFT:
dc.DrawLine(true_rect.GetRight(), true_rect.y, true_rect.GetRight(), true_rect.y + true_rect.height)
arrow_points[0] = wx.Point(rect.width / 2 - 2, rect.height / 2)
arrow_points[1] = arrow_points[0] + wx.Point(5, -5)
arrow_points[2] = arrow_points[0] + wx.Point(5, 5)
elif result == RIBBON_SCROLL_BTN_RIGHT:
dc.DrawLine(true_rect.x, true_rect.y, true_rect.x, true_rect.y + true_rect.height)
arrow_points[0] = wx.Point(rect.width / 2 + 3, rect.height / 2)
arrow_points[1] = arrow_points[0] - wx.Point(5, -5)
arrow_points[2] = arrow_points[0] - wx.Point(5, 5)
elif result == RIBBON_SCROLL_BTN_DOWN:
dc.DrawLine(true_rect.x, true_rect.y, true_rect.x + true_rect.width, true_rect.y)
arrow_points[0] = wx.Point(rect.width / 2, rect.height / 2 + 3)
arrow_points[1] = arrow_points[0] - wx.Point( 5, 5)
arrow_points[2] = arrow_points[0] - wx.Point(-5, 5)
elif result == RIBBON_SCROLL_BTN_UP:
dc.DrawLine(true_rect.x, true_rect.GetBottom(), true_rect.x + true_rect.width, true_rect.GetBottom())
arrow_points[0] = wx.Point(rect.width / 2, rect.height / 2 - 2)
arrow_points[1] = arrow_points[0] + wx.Point( 5, 5)
arrow_points[2] = arrow_points[0] + wx.Point(-5, 5)
else:
return
x = rect.x
y = rect.y
if style & RIBBON_SCROLL_BTN_ACTIVE:
x += 1
y += 1
dc.SetPen(wx.TRANSPARENT_PEN)
B = wx.Brush(self._tab_label_colour)
dc.SetBrush(B)
dc.DrawPolygon(arrow_points, x, y)
def GetPanelSize(self, dc, wnd, client_size, client_offset=None):
"""
Calculate the size of a panel for a given client size.
This should increment the given size by enough to fit the panel label and other
chrome.
:param `dc`: A device context to use if one is required for size calculations;
:param `wnd`: The ribbon panel in question;
:param `client_size`: The client size;
:param `client_offset`: The offset where the client rectangle begins within
the panel (may be ``None``).
:see: L{GetPanelClientSize}
"""
dc.SetFont(self._panel_label_font)
label_size = wx.Size(*dc.GetTextExtent(wnd.GetLabel()))
label_height = label_size.GetHeight() + 5
if self._flags & RIBBON_BAR_FLOW_VERTICAL:
client_size.IncBy(4, label_height + 6)
if client_offset is not None:
client_offset = wx.Point(2, label_height + 3)
else:
client_size.IncBy(6, label_height + 4)
if client_offset is not None:
client_offset = wx.Point(3, label_height + 2)
return client_size
def GetPanelClientSize(self, dc, wnd, size, client_offset=None):
"""
Calculate the client size of a panel for a given overall size.
This should act as the inverse to L{GetPanelSize}, and decrement the given size
by enough to fit the panel label and other chrome.
:param `dc`: A device context to use if one is required for size calculations;
:param `wnd`: The ribbon panel in question;
:param `size`: The overall size to calculate client size for;
:param `client_offset`: The offset where the returned client size begins within
the given (may be ``None``).
:see: L{GetPanelSize}
"""
dc.SetFont(self._panel_label_font)
label_size = wx.Size(*dc.GetTextExtent(wnd.GetLabel()))
label_height = label_size.GetHeight() + 5
if self._flags & RIBBON_BAR_FLOW_VERTICAL:
size.DecBy(4, label_height + 6)
if client_offset is not None:
client_offset = wx.Point(2, label_height + 3)
else:
size.DecBy(6, label_height + 4)
if client_offset is not None:
client_offset = wx.Point(3, label_height + 2)
return size, client_offset
def DrawPanelBackground(self, dc, wnd, rect):
"""
Draw the background and chrome for a ribbon panel.
This should draw the border, background, label, and any other items of a panel
which are outside the client area of a panel. Note that when a panel is
minimised, this function is not called - only L{DrawMinimisedPanel} is called,
so a background should be explicitly painted by that if required.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto, which is always the panel
whose background and chrome is being drawn. The panel label and other panel
attributes can be obtained by querying this;
:param `rect`: The rectangle within which to draw.
"""
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
true_rect = wx.Rect(*rect)
true_rect = self.RemovePanelPadding(true_rect)
dc.SetPen(self._panel_border_pen)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(true_rect.x, true_rect.y, true_rect.width, true_rect.height)
true_rect.x += 1
true_rect.width -= 2
true_rect.y += 1
dc.SetFont(self._panel_label_font)
label_size = wx.Size(*dc.GetTextExtent(wnd.GetLabel()))
label_height = label_size.GetHeight() + 5
label_rect = wx.Rect(*true_rect)
label_rect.height = label_height - 1
dc.DrawLine(label_rect.x, label_rect.y + label_rect.height, label_rect.x + label_rect.width, label_rect.y + label_rect.height)
label_bg_colour = self._panel_label_background_colour
label_bg_grad_colour = self._panel_label_background_gradient_colour
if wnd.IsHovered():
label_bg_colour = self._panel_hover_label_background_colour
label_bg_grad_colour = self._panel_hover_label_background_gradient_colour
dc.SetTextForeground(self._panel_hover_label_colour)
else:
dc.SetTextForeground(self._panel_label_colour)
if wx.Platform == "__WXMAC__":
dc.GradientFillLinear(label_rect, label_bg_grad_colour, label_bg_colour, wx.SOUTH)
else:
dc.GradientFillLinear(label_rect, label_bg_colour, label_bg_grad_colour, wx.SOUTH)
dc.SetFont(self._panel_label_font)
dc.DrawText(wnd.GetLabel(), label_rect.x + 3, label_rect.y + 2)
if wnd.IsHovered():
gradient_rect = wx.Rect(*true_rect)
gradient_rect.y += label_rect.height + 1
gradient_rect.height = true_rect.height - label_rect.height - 3
if wx.Platform == "__WXMAC__":
colour = self._page_hover_background_gradient_colour
gradient = self._page_hover_background_colour
else:
colour = self._page_hover_background_colour
gradient = self._page_hover_background_gradient_colour
dc.GradientFillLinear(gradient_rect, colour, gradient, wx.SOUTH)
def DrawMinimisedPanel(self, dc, wnd, rect, bitmap):
"""
Draw a minimised ribbon panel.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto, which is always the panel
which is minimised. The panel label can be obtained from this window. The
minimised icon obtained from querying the window may not be the size requested
by L{RibbonMSWArtProvider.GetMinimisedPanelMinimumSize} - the argument contains the icon in the
requested size;
:param `rect`: The rectangle within which to draw. The size of the rectangle
will be at least the size returned by L{RibbonMSWArtProvider.GetMinimisedPanelMinimumSize};
:param `bitmap`: A copy of the panel's minimised bitmap rescaled to the size
returned by L{RibbonMSWArtProvider.GetMinimisedPanelMinimumSize}.
"""
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
true_rect = wx.Rect(*rect)
true_rect = self.RemovePanelPadding(true_rect)
dc.SetPen(self._panel_border_pen)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(true_rect.x, true_rect.y, true_rect.width, true_rect.height)
true_rect.Deflate(1, 1)
if wnd.IsHovered() or wnd.GetExpandedPanel():
colour = self._page_hover_background_colour
gradient = self._page_hover_background_gradient_colour
if (wx.Platform == "__WXMAC__" and not wnd.GetExpandedPanel()) or \
(wx.Platform != "__WXMAC__" and wnd.GetExpandedPanel()):
temp = colour
colour = gradient
gradient = temp
dc.GradientFillLinear(true_rect, colour, gradient, wx.SOUTH)
preview = self.DrawMinimisedPanelCommon(dc, wnd, true_rect)
dc.SetPen(self._panel_border_pen)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(preview.x, preview.y, preview.width, preview.height)
preview.Deflate(1, 1)
preview_caption_rect = wx.Rect(*preview)
preview_caption_rect.height = 7
preview.y += preview_caption_rect.height
preview.height -= preview_caption_rect.height
if wx.Platform == "__WXMAC__":
dc.GradientFillLinear(preview_caption_rect, self._panel_hover_label_background_gradient_colour,
self._panel_hover_label_background_colour, wx.SOUTH)
dc.GradientFillLinear(preview, self._page_hover_background_gradient_colour,
self._page_hover_background_colour, wx.SOUTH)
else:
dc.GradientFillLinear(preview_caption_rect, self._panel_hover_label_background_colour,
self._panel_hover_label_background_gradient_colour, wx.SOUTH)
dc.GradientFillLinear(preview, self._page_hover_background_colour,
self._page_hover_background_gradient_colour, wx.SOUTH)
if bitmap.IsOk():
dc.DrawBitmap(bitmap, preview.x + (preview.width - bitmap.GetWidth()) / 2,
preview.y + (preview.height - bitmap.GetHeight()) / 2, True)
def DrawPartialPanelBackground(self, dc, wnd, rect):
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
offset = wx.Point(*wnd.GetPosition())
parent = wnd.GetParent()
panel = None
while 1:
panel = parent
if isinstance(panel, PANEL.RibbonPanel):
if not panel.IsHovered():
return
break
offset += parent.GetPosition()
parent = panel.GetParent()
if panel is None:
return
background = wx.Rect(0, 0, *panel.GetSize())
background = self.RemovePanelPadding(background)
background.x += 1
background.width -= 2
dc.SetFont(self._panel_label_font)
caption_height = dc.GetTextExtent(panel.GetLabel())[1] + 7
background.y += caption_height - 1
background.height -= caption_height
paint_rect = wx.Rect(*rect)
paint_rect.x += offset.x
paint_rect.y += offset.y
if wx.Platform == "__WXMAC__":
bg_grad_clr = self._page_hover_background_colour
bg_clr = self._page_hover_background_gradient_colour
else:
bg_clr = self._page_hover_background_colour
bg_grad_clr = self._page_hover_background_gradient_colour
paint_rect.Intersect(background)
if not paint_rect.IsEmpty():
starting_colour = RibbonInterpolateColour(bg_clr, bg_grad_clr, paint_rect.y, background.y, background.y + background.height)
ending_colour = RibbonInterpolateColour(bg_clr, bg_grad_clr, paint_rect.y + paint_rect.height, background.y, background.y + background.height)
paint_rect.x -= offset.x
paint_rect.y -= offset.y
dc.GradientFillLinear(paint_rect, starting_colour, ending_colour, wx.SOUTH)
def DrawGalleryBackground(self, dc, wnd, rect):
"""
Draw the background and chrome for a L{RibbonGallery} control.
This should draw the border, brackground, scroll buttons, extension button, and
any other UI elements which are not attached to a specific gallery item.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto, which is always the gallery
whose background and chrome is being drawn. Attributes used during drawing like
the gallery hover state and individual button states can be queried from this
parameter by L{RibbonGallery.IsHovered}, L{RibbonGallery.GetExtensionButtonState},
L{RibbonGallery.GetUpButtonState}, and L{RibbonGallery.GetDownButtonState};
:param `rect`: The rectangle within which to draw. This rectangle is the entire
area of the gallery control, not just the client rectangle.
"""
self.DrawPartialPanelBackground(dc, wnd, rect)
if wnd.IsHovered():
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._gallery_hover_background_brush)
if self._flags & RIBBON_BAR_FLOW_VERTICAL:
dc.DrawRectangle(rect.x + 1, rect.y + 1, rect.width - 2, rect.height - 16)
else:
dc.DrawRectangle(rect.x + 1, rect.y + 1, rect.width - 16, rect.height - 2)
dc.SetPen(self._gallery_border_pen)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
self.DrawGalleryBackgroundCommon(dc, wnd, rect)
def DrawGalleryButton(self, dc, rect, state, bitmaps):
extra_height = 0
extra_width = 0
reduced_rect = wx.Rect(*rect)
reduced_rect.Deflate(1, 1)
if self._flags & RIBBON_BAR_FLOW_VERTICAL:
reduced_rect.width += 1
extra_width = 1
else:
reduced_rect.height += 1
extra_height = 1
if state == RIBBON_GALLERY_BUTTON_NORMAL:
dc.GradientFillLinear(reduced_rect, self._gallery_button_background_colour, self._gallery_button_background_gradient_colour, wx.SOUTH)
btn_bitmap = bitmaps[0]
elif state == RIBBON_GALLERY_BUTTON_HOVERED:
dc.SetPen(self._gallery_item_border_pen)
dc.SetBrush(self._gallery_button_hover_background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width + extra_width, rect.height + extra_height)
btn_bitmap = bitmaps[1]
elif state == RIBBON_GALLERY_BUTTON_ACTIVE:
dc.SetPen(self._gallery_item_border_pen)
dc.SetBrush(self._gallery_button_active_background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width + extra_width, rect.height + extra_height)
btn_bitmap = bitmaps[2]
elif state == RIBBON_GALLERY_BUTTON_DISABLED:
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._gallery_button_disabled_background_brush)
dc.DrawRectangle(reduced_rect.x, reduced_rect.y, reduced_rect.width, reduced_rect.height)
btn_bitmap = bitmaps[3]
dc.DrawBitmap(btn_bitmap, reduced_rect.x + reduced_rect.width / 2 - 2, (rect.y + rect.height / 2) - 2, True)
def DrawGalleryItemBackground(self, dc, wnd, rect, item):
"""
Draw the background of a single item in a L{RibbonGallery} control.
This is painted on top of a gallery background, and behind the items bitmap.
Unlike L{DrawButtonBarButton} and L{DrawTool}, it is not expected to draw the
item bitmap - that is done by the gallery control itself.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto, which is always the gallery
which contains the item being drawn;
:param `rect`: The rectangle within which to draw. The size of this rectangle
will be the size of the item's bitmap, expanded by gallery item padding values
(``RIBBON_ART_GALLERY_BITMAP_PADDING_LEFT_SIZE``, ``RIBBON_ART_GALLERY_BITMAP_PADDING_RIGHT_SIZE``,
``RIBBON_ART_GALLERY_BITMAP_PADDING_TOP_SIZE``, and ``RIBBON_ART_GALLERY_BITMAP_PADDING_BOTTOM_SIZE``).
The drawing rectangle will be entirely within a rectangle on the same device
context previously painted with L{DrawGalleryBackground};
:param `item`: The item whose background is being painted. Typically the
background will vary if the item is hovered, active, or selected;
L{RibbonGallery.GetSelection}, L{RibbonGallery.GetActiveItem}, and
L{RibbonGallery.GetHoveredItem} can be called to test if the given item is in one of these states.
"""
if wnd.GetHoveredItem() != item and wnd.GetActiveItem() != item and wnd.GetSelection() != item:
return
dc.SetPen(self._gallery_item_border_pen)
if wnd.GetActiveItem() == item or wnd.GetSelection() == item:
dc.SetBrush(self._gallery_button_active_background_brush)
else:
dc.SetBrush(self._gallery_button_hover_background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
def DrawButtonBarBackground(self, dc, wnd, rect):
"""
Draw the background for a L{bar.RibbonButtonBar} control.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto (which will typically
be the button bar itself, though this is not guaranteed);
:param `rect`: The rectangle within which to draw.
"""
self.DrawPartialPanelBackground(dc, wnd, rect)
def DrawButtonBarButton(self, dc, wnd, rect, kind, state, label, bitmap_large, bitmap_small):
"""
Draw a single button for a L{bar.RibbonButtonBar} control.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto;
:param `rect`: The rectangle within which to draw. The size of this rectangle
will be a size previously returned by L{RibbonMSWArtProvider.GetButtonBarButtonSize}, and the
rectangle will be entirely within a rectangle on the same device context
previously painted with L{DrawButtonBarBackground};
:param `kind`: The kind of button to draw (normal, dropdown or hybrid);
:param `state`: Combination of a size flag and state flags from the
`RibbonButtonBarButtonState` enumeration;
:param `label`: The label of the button;
:param `bitmap_large`: The large bitmap of the button (or the large disabled
bitmap when ``RIBBON_BUTTONBAR_BUTTON_DISABLED`` is set in );
:param `bitmap_small`: The small bitmap of the button (or the small disabled
bitmap when ``RIBBON_BUTTONBAR_BUTTON_DISABLED`` is set in ).
"""
if state & (RIBBON_BUTTONBAR_BUTTON_HOVER_MASK | RIBBON_BUTTONBAR_BUTTON_ACTIVE_MASK):
dc.SetPen(self._button_bar_hover_border_pen)
bg_rect = wx.Rect(*rect)
bg_rect.Deflate(1, 1)
if kind == RIBBON_BUTTON_HYBRID:
result = state & RIBBON_BUTTONBAR_BUTTON_SIZE_MASK
if result == RIBBON_BUTTONBAR_BUTTON_LARGE:
iYBorder = rect.y + bitmap_large.GetHeight() + 4
partial_bg = wx.Rect(*rect)
if state & RIBBON_BUTTONBAR_BUTTON_NORMAL_HOVERED:
partial_bg.SetBottom(iYBorder - 1)
else:
partial_bg.height -= (iYBorder - partial_bg.y + 1)
partial_bg.y = iYBorder + 1
dc.DrawLine(rect.x, iYBorder, rect.x + rect.width, iYBorder)
bg_rect.Intersect(partial_bg)
elif result == RIBBON_BUTTONBAR_BUTTON_MEDIUM:
iArrowWidth = 9
if state & RIBBON_BUTTONBAR_BUTTON_NORMAL_HOVERED:
bg_rect.width -= iArrowWidth
dc.DrawLine(bg_rect.x + bg_rect.width, rect.y, bg_rect.x + bg_rect.width, rect.y + rect.height)
else:
iArrowWidth -= 1
bg_rect.x += bg_rect.width - iArrowWidth
bg_rect.width = iArrowWidth
dc.DrawLine(bg_rect.x - 1, rect.y, bg_rect.x - 1, rect.y + rect.height)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
dc.SetPen(wx.TRANSPARENT_PEN)
if state & RIBBON_BUTTONBAR_BUTTON_ACTIVE_MASK:
dc.SetBrush(self._button_bar_active_background_brush)
else:
dc.SetBrush(self._button_bar_hover_background_brush)
dc.DrawRectangle(bg_rect.x, bg_rect.y, bg_rect.width, bg_rect.height)
dc.SetFont(self._button_bar_label_font)
dc.SetTextForeground(self._button_bar_label_colour)
self.DrawButtonBarButtonForeground(dc, rect, kind, state, label, bitmap_large, bitmap_small)
def DrawToolBarBackground(self, dc, wnd, rect):
"""
Draw the background for a L{RibbonToolBar} control.
:param `dc`: The device context to draw onto;
:param `wnd`: The which is being drawn onto. In most cases this will be
a L{RibbonToolBar}, but it doesn't have to be;
:param `rect`: The rectangle within which to draw. Some of this rectangle
will later be drawn over using L{DrawToolGroupBackground} and L{DrawTool},
but not all of it will (unless there is only a single group of tools).
"""
self.DrawPartialPanelBackground(dc, wnd, rect)
def DrawToolGroupBackground(self, dc, wnd, rect):
"""
Draw the background for a group of tools on a L{RibbonToolBar} control.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto. In most cases this will
be a L{RibbonToolBar}, but it doesn't have to be;
:param `rect`: The rectangle within which to draw. This rectangle is a union
of the individual tools' rectangles. As there are no gaps between tools, this
rectangle will be painted over exactly once by calls to L{DrawTool}. The
group background could therefore be painted by L{DrawTool}, though it can be
conceptually easier and more efficient to draw it all at once here. The
rectangle will be entirely within a rectangle on the same device context
previously painted with L{DrawToolBarBackground}.
"""
dc.SetPen(self._toolbar_border_pen)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
bg_rect = wx.Rect(*rect)
bg_rect.Deflate(1, 1)
dc.GradientFillLinear(bg_rect, self._tool_background_colour, self._tool_background_gradient_colour, wx.SOUTH)
def DrawTool(self, dc, wnd, rect, bitmap, kind, state):
"""
Draw a single tool (for a L{RibbonToolBar} control).
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto. In most cases this will
be a L{RibbonToolBar}, but it doesn't have to be;
:param `rect`: The rectangle within which to draw. The size of this rectangle
will at least the size returned by L{RibbonMSWArtProvider.GetToolSize}, and the height of it will
be equal for all tools within the same group. The rectangle will be entirely
within a rectangle on the same device context previously painted with
L{DrawToolGroupBackground};
:param `bitmap`: The bitmap to use as the tool's foreground. If the tool is a
hybrid or dropdown tool, then the foreground should also contain a standard
dropdown button;
:param `kind`: The kind of tool to draw (normal, dropdown, or hybrid);
:param `state`: A combination of wx.RibbonToolBarToolState flags giving the
state of the tool and it's relative position within a tool group.
"""
bg_rect = wx.Rect(*rect)
bg_rect.Deflate(1, 1)
if state & RIBBON_TOOLBAR_TOOL_LAST == 0:
bg_rect.width += 1
is_custom_bg = (state & (RIBBON_TOOLBAR_TOOL_HOVER_MASK | RIBBON_TOOLBAR_TOOL_ACTIVE_MASK)) != 0
is_split_hybrid = kind == RIBBON_BUTTON_HYBRID and is_custom_bg
# Background
if is_custom_bg:
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._tool_hover_background_brush)
dc.DrawRectangle(bg_rect.x, bg_rect.y, bg_rect.width, bg_rect.height)
if state & RIBBON_TOOLBAR_TOOL_ACTIVE_MASK:
active_rect = wx.Rect(*bg_rect)
if kind == RIBBON_BUTTON_HYBRID:
active_rect.width -= 8
if state & RIBBON_TOOLBAR_TOOL_DROPDOWN_ACTIVE:
active_rect.x += active_rect.width
active_rect.width = 8
dc.SetBrush(self._tool_active_background_brush)
dc.DrawRectangle(active_rect.x, active_rect.y, active_rect.width, active_rect.height)
# Border
if is_custom_bg:
dc.SetPen(self._toolbar_hover_borden_pen)
else:
dc.SetPen(self._toolbar_border_pen)
if state & RIBBON_TOOLBAR_TOOL_FIRST == 0:
existing = dc.GetPixel(rect.x, rect.y + 1)
if existing == wx.NullColour or existing != self._toolbar_hover_borden_pen.GetColour():
dc.DrawLine(rect.x, rect.y + 1, rect.x, rect.y + rect.height - 1)
if is_custom_bg:
border_rect = wx.Rect(*bg_rect)
border_rect.Inflate(1, 1)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(border_rect.x, border_rect.y, border_rect.width, border_rect.height)
# Foreground
avail_width = bg_rect.GetWidth()
if kind != RIBBON_BUTTON_NORMAL:
avail_width -= 8
if is_split_hybrid:
dc.DrawLine(rect.x + avail_width + 1, rect.y, rect.x + avail_width + 1, rect.y + rect.height)
dc.DrawBitmap(self._toolbar_drop_bitmap, bg_rect.x + avail_width + 2, bg_rect.y + (bg_rect.height / 2) - 2, True)
dc.DrawBitmap(bitmap, bg_rect.x + (avail_width - bitmap.GetWidth()) / 2, bg_rect.y + (bg_rect.height - bitmap.GetHeight()) / 2, True)
| agpl-3.0 | -4,951,443,095,080,023,000 | 45.146766 | 154 | 0.620721 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.