hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4013520787f6cc9bbf08df7635faa9848889aff8 | 13,500 | py | Python | onemsdk/parser/tag.py | mvnm/onemsdk | d6293c632d15af3b044f130343899d3b242e287a | [
"MIT"
]
| null | null | null | onemsdk/parser/tag.py | mvnm/onemsdk | d6293c632d15af3b044f130343899d3b242e287a | [
"MIT"
]
| 6 | 2019-07-05T07:54:03.000Z | 2019-09-30T10:47:10.000Z | onemsdk/parser/tag.py | mvnm/onemsdk | d6293c632d15af3b044f130343899d3b242e287a | [
"MIT"
]
| 2 | 2019-08-30T07:36:48.000Z | 2020-01-13T01:40:06.000Z | import inspect
import sys
from abc import ABC, abstractmethod
from enum import Enum
from typing import List, Union, Type, Optional, Dict, Any
from pydantic import BaseModel
from onemsdk.exceptions import NodeTagMismatchException, ONEmSDKException
from .node import Node
__all__ = ['Tag', 'HeaderTag', 'FooterTag', 'BrTag', 'UlTag', 'LiTag', 'FormTag',
'SectionTag', 'InputTagAttrs', 'InputTag', 'FormTagAttrs', 'PTag', 'ATag',
'ATagAttrs', 'get_tag_cls', 'SectionTagAttrs', 'LiTagAttrs', 'InputTagType']
class Tag(BaseModel, ABC):
class Config:
tag_name: str = None
attrs: Any = None
children: List[Union['Tag', str]] = []
@abstractmethod
def render(self) -> str:
pass
@classmethod
def from_node(cls, node: Node) -> 'Tag':
if node.tag != cls.Config.tag_name:
raise NodeTagMismatchException(
f'Expected tag <{cls.Config.tag_name}>, received <{node.tag}>')
attrs = cls.get_attrs(node)
children = []
for node_child in node.children:
if isinstance(node_child, str):
children.append(node_child)
else:
child_tag_cls = get_tag_cls(node_child.tag)
children.append(child_tag_cls.from_node(node_child))
return cls(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node):
return None
class HeaderTag(Tag):
class Config:
tag_name = 'header'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<header> must have max 1 text child')
super(HeaderTag, self).__init__(children=children)
def render(self):
if len(self.children) == 1:
return self.children[0]
return ''
def data(self):
return None
HeaderTag.update_forward_refs()
class FooterTag(Tag):
class Config:
tag_name = 'footer'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<footer> must have max 1 text child')
super(FooterTag, self).__init__(children=children)
def render(self):
if len(self.children) == 1:
return self.children[0]
return ''
def data(self):
return None
FooterTag.update_forward_refs()
class InputTagType(str, Enum):
# standard HTML5 input values
text = 'text'
date = 'date'
number = 'number'
hidden = 'hidden'
email = 'email'
url = 'url'
# not standard
datetime = 'datetime'
location = 'location'
class InputTagAttrs(BaseModel):
# standard HTML5 attributes
type: InputTagType
min: Union[int, float] = None
minlength: int = None
max: Union[int, float] = None
maxlength: int = None
step: int = None
value: str = None # only for type="hidden"
pattern: str = None
# not standard
min_error: str = None
minlength_error: str = None
max_error: str = None
maxlength_error: str = None
class InputTag(Tag):
class Config:
tag_name = 'input'
attrs: InputTagAttrs
def __init__(self, attrs: InputTagAttrs, **data):
super(InputTag, self).__init__(attrs=attrs)
@classmethod
def get_attrs(cls, node: Node):
return InputTagAttrs(
type=node.attrs.get('type'),
min=node.attrs.get('min'),
min_error=node.attrs.get('min-error'),
minlength=node.attrs.get('minlength'),
minlength_error=node.attrs.get('minlength-error'),
max=node.attrs.get('max'),
max_error=node.attrs.get('max-error'),
maxlength=node.attrs.get('maxlength'),
maxlength_error=node.attrs.get('maxlength-error'),
step=node.attrs.get('step'),
value=node.attrs.get('value'),
pattern=node.attrs.get('pattern'),
)
def render(self):
return ''
def data(self) -> Optional[Dict[str, str]]:
return None
InputTag.update_forward_refs()
class LabelTag(Tag):
class Config:
tag_name = 'label'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<label> must have max 1 text child')
super(LabelTag, self).__init__(children=children)
def render(self):
return self.children[0]
LabelTag.update_forward_refs()
class ATagAttrs(BaseModel):
href: str
method: Optional[str] = 'GET'
class ATag(Tag):
class Config:
tag_name: str = 'a'
attrs: ATagAttrs
def __init__(self, attrs: ATagAttrs, children: List[str]):
if len(children) != 1 or not isinstance(children[0], str):
raise ONEmSDKException('<a> must have 1 text child')
super(ATag, self).__init__(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node) -> ATagAttrs:
return ATagAttrs(href=node.attrs.get('href'),
method=node.attrs.get('method') or 'GET')
def render(self):
return self.children[0]
def data(self) -> Dict[str, str]:
return {
**self.attrs.dict(),
'text': self.children[0]
}
ATag.update_forward_refs()
class LiTagAttrs(BaseModel):
value: Optional[str]
text_search: Optional[str]
class LiTag(Tag):
class Config:
tag_name = 'li'
attrs: LiTagAttrs
def __init__(self, children: List[Union[ATag, str]], attrs: LiTagAttrs = None):
if len(children) != 1 or not isinstance(children[0], (str, ATag)):
raise ONEmSDKException('<li> must have 1 (text or <a>) child')
if attrs is None:
attrs = LiTagAttrs()
super(LiTag, self).__init__(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node):
return LiTagAttrs(
value=node.attrs.get('value'),
text_search=node.attrs.get('text-search'),
)
def render(self):
if isinstance(self.children[0], ATag):
return self.children[0].render()
return self.children[0]
LiTag.update_forward_refs()
class UlTag(Tag):
class Config:
tag_name = 'ul'
def __init__(self, children: List[LiTag], **data):
if not children or not isinstance(children[0], LiTag):
raise ONEmSDKException('<ul> must have min 1 <li> child')
super(UlTag, self).__init__(children=children)
def render(self):
return '\n'.join([child.render() for child in self.children])
UlTag.update_forward_refs()
class PTag(Tag):
class Config:
tag_name = 'p'
def __init__(self, children: List[str] = None, **data):
children = children or []
if len(children) > 1 or children and not isinstance(children[0], str):
raise ONEmSDKException('<p> must have max 1 text child')
super(PTag, self).__init__(children=children)
def render(self):
if len(self.children) == 1:
return self.children[0]
return ''
def data(self):
return {
'text': self.children[0],
'href': None,
'data': None
}
PTag.update_forward_refs()
class BrTag(Tag):
class Config:
tag_name = 'br'
def __init__(self, **data):
super(BrTag, self).__init__()
def render(self):
return '\n'
def data(self):
return {
'text': '\n',
'data': None,
'href': None
}
BrTag.update_forward_refs()
class SectionTagAttrs(BaseModel):
header: Optional[str]
footer: Optional[str]
name: Optional[str]
auto_select: bool = False
multi_select: bool = False
numbered: bool = False
chunking_footer: Optional[str]
confirmation_label: Optional[str]
method: Optional[str]
required: Optional[bool]
status_exclude: Optional[bool]
status_prepend: Optional[bool]
url: Optional[str]
validate_type_error: Optional[str]
validate_type_error_footer: Optional[str]
validate_url: Optional[str]
class SectionTag(Tag):
class Config:
tag_name = 'section'
attrs: SectionTagAttrs
def __init__(self, attrs: SectionTagAttrs = None, children: List = None):
children = children or []
allowed_children = (FooterTag, HeaderTag, UlTag, PTag,
InputTag, LabelTag, BrTag, str)
for child in children:
if not isinstance(child, allowed_children):
raise ONEmSDKException(
f'<{child.Config.tag_name}> cannot be child for <section>')
super(SectionTag, self).__init__(attrs=attrs, children=children)
def render(self, exclude_header: bool = False, exclude_footer: bool = False):
# Add a temporary \n for help
rendered_children = ['\n']
for child in self.children:
if isinstance(child, HeaderTag) and exclude_header:
# Do not include header
continue
if isinstance(child, FooterTag) and exclude_footer:
# Do not include footer
continue
if isinstance(child, str):
text = child
else:
text = child.render()
if text:
if isinstance(child, PTag) or isinstance(child, UlTag):
if rendered_children[-1] != '\n':
rendered_children.append('\n')
rendered_children.append(text)
rendered_children.append('\n')
else:
rendered_children.append(text)
# Remove the temporary \n
del rendered_children[0]
if rendered_children and rendered_children[-1] == '\n':
del rendered_children[-1]
return ''.join(rendered_children)
@classmethod
def get_attrs(cls, node: Node) -> SectionTagAttrs:
return SectionTagAttrs(
header=node.attrs.get('header'),
footer=node.attrs.get('footer'),
name=node.attrs.get('name'),
# Note that boolean attributes in HTML are evaluated to True if they are
# present (their actual value does not matter). They are evaluated to False
# only when they are missing
auto_select='auto-select' in node.attrs,
multi_select='multi-select' in node.attrs,
numbered='numbered' in node.attrs,
chunking_footer=node.attrs.get('chunking-footer'),
confirmation_label=node.attrs.get('confirmation-label'),
method=node.attrs.get('method'),
required='required' in node.attrs,
status_exclude='status-exclude' in node.attrs,
status_prepend='status-prepend' in node.attrs,
url=node.attrs.get('url'),
validate_type_error=node.attrs.get('validate-type-error'),
validate_type_error_footer=node.attrs.get('validate-type-error-footer'),
validate_url=node.attrs.get('validate-url'),
)
SectionTag.update_forward_refs()
class FormTagAttrs(BaseModel):
header: Optional[str]
footer: Optional[str]
action: str
method: str = 'POST'
completion_status_show: bool = False
completion_status_in_header: bool = False
skip_confirmation: bool = False
class FormTag(Tag):
class Config:
tag_name = 'form'
attrs: FormTagAttrs
children: List[SectionTag]
def __init__(self, attrs: FormTagAttrs, children: List[SectionTag]):
if not children:
raise ONEmSDKException('<form> must have at least 1 child')
for child in children:
if not isinstance(child, SectionTag):
raise ONEmSDKException('<form> can have only <section> children')
if not child.attrs.name:
raise ONEmSDKException('<form> can contain only named <section> tags. '
'Please add a unique "name" attribute in each form '
'section.')
super(FormTag, self).__init__(attrs=attrs, children=children)
@classmethod
def get_attrs(cls, node: Node):
return FormTagAttrs(
header=node.attrs.get('header'),
footer=node.attrs.get('footer'),
action=node.attrs.get('action'),
method=node.attrs.get('method') or 'POST',
completion_status_show='completion-status-show' in node.attrs,
completion_status_in_header='completion-status-in-header' in node.attrs,
skip_confirmation='skip-confirmation' in node.attrs,
)
def render(self):
return '\n'.join([child.render() for child in self.children])
FormTag.update_forward_refs()
_map_tag_cls = {}
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, Tag):
_map_tag_cls[obj.Config.tag_name] = obj
def get_tag_cls(tag_name: str) -> Type[Tag]:
global _map_tag_cls
try:
return _map_tag_cls[tag_name]
except KeyError:
raise ONEmSDKException(f'Tag <{tag_name}> is not supported')
| 28.421053 | 91 | 0.605333 | 12,177 | 0.902 | 0 | 0 | 3,476 | 0.257481 | 0 | 0 | 1,864 | 0.138074 |
4014970fe4ab56a4d4e3af8b117a8432e328801e | 12,113 | py | Python | backend/syntax/rule.py | austinmarsray/Ccompiler | b3ef61283e33d06294c491b71586a945c38c6e54 | [
"MIT"
]
| null | null | null | backend/syntax/rule.py | austinmarsray/Ccompiler | b3ef61283e33d06294c491b71586a945c38c6e54 | [
"MIT"
]
| null | null | null | backend/syntax/rule.py | austinmarsray/Ccompiler | b3ef61283e33d06294c491b71586a945c38c6e54 | [
"MIT"
]
| null | null | null | class Sign:
"""
符号
"""
def __init__(self, sign_type, sign_str='', sign_line=-1):
"""
构造
:param sign_type: 符号的类型
:param sign_str: 符号的内容(可以为空)
:param sign_line: 符号所在行数(可以为空)
"""
self.type = sign_type
self.str = sign_str
self.line = sign_line
def is_terminal_sign(self):
"""
是不是终结符
:return: True/False
"""
if self.type == 'empty':
return True
else:
for i in terminal_sign_type:
if i == self.type:
return True
return False
def is_non_terminal_sign(self):
"""
是不是非终结符
:return: True/False
"""
for i in non_terminal_sign_type:
if i == self.type:
return True
return False
def is_empty_sign(self):
"""
是不是空字
:return: True/False
"""
return self.type == 'empty'
class Production:
"""
产生式
"""
def __init__(self, left_type, right_types):
"""
产生式左边
:param left_type: 产生式左边的符号类型
:param right_types: 产生式右边的符号类型列表
:param semantic_start: 语义操作关键字 - 开始
:param semantic_children: 语义操作关键字 - 孩子
:param semantic_end: 语义操作关键字 - 结束
"""
self.left = Sign(left_type)
self.right = list()
for i in right_types:
self.right.append(Sign(i))
# 调试用的
SignToChar = {
'else': 'else',
'if': 'if',
'int': 'int',
'return': 'return',
'void': 'void',
'while': 'while',
'addition': '+',
'subtraction': '-',
'multiplication': '*',
'division': '/',
'bigger': '>',
'bigger-equal': '>=',
'smaller': '<',
'smaller-equal': '<=',
'equal': '==',
'not-equal': '!=',
'evaluate': '=',
'semicolon': ';',
'comma': ',',
'left-parentheses': '(',
'right-parentheses': ')',
'left-bracket': '[',
'right-bracket': ']',
'left-brace': '{',
'right-brace': '}',
'id': 'id',
'num': 'num',
'pound': '#'
}
self.str = self.left.type + ' ->'
if len(self.right) == 0:
self.str += 'ϵ'
else:
for i in self.right:
if i.is_non_terminal_sign():
self.str += ' ' + i.type
else:
self.str += ' ' + SignToChar[i.type]
"""
1. program -> define-list
2. define-list -> define define-list
| empty
3. define -> type ID define-type
4. define-type -> var-define-follow
| fun-define-follow
5. var-define-follow -> ;
| [ NUM ] ;
6. type -> int
| void
7. fun-define-follow -> ( params ) code-block
8. params -> param-list
| empty
9. param-list -> param param-follow
10. param-follow -> , param param-follow
| empty
11. param -> type ID array-subscript
12. array-subscript -> [ ]
| empty
13. code-block -> { local-define-list code-list }
14. local-define-list -> local-var-define local-define-list
| empty
15. local-var-define -> type ID var-define-follow
16. code-list -> code code-list
| empty
17. code -> normal-statement
| selection-statement
| iteration-statement
| return-statement
18. normal-statement -> ;
| ID normal-statement-follow
19. normal-statement-follow -> var-follow = expression ;
| call-follow ;
20. call-follow -> ( call-params )
21. call-params -> call-param-list
| empty
22. call-param-list -> expression call-param-follow
23. call-param-follow -> , expression call-param-follow
| empty
24. selection-statement -> if ( expression ) { code-list } selection-follow
25. selection-follow -> else { code-list }
| empty
26. iteration-statement -> while ( expression ) iteration-follow
27. iteration-follow -> { code-list }
| code
28. return-statement -> return return-follow
29. return-follow -> ;
| expression ;
30. var-follow -> [ expression ]
| empty
31. expression -> additive-expr expression-follow
32. expression-follow -> rel-op additive-expr
| empty
33. rel-op -> <=
| <
| >
| >=
| ==
| !=
34. additive-expr -> term additive-expr-follow
35. additive-expr-follow -> add-op term additive-expr-follow
| empty
36. add-op -> +
| -
37. term -> factor term-follow
38. term-follow -> mul-op factor term-follow
| empty
39. mul-op -> *
| /
40. factor -> ( expression )
| ID id-factor-follow | NUM
41. id-factor-follow -> var-follow
| ( args )
42. args -> arg-list
| empty
43. arg-list -> expression arg-list-follow
44. arg-list-follow -> , expression arg-list-follow
| empty
"""
# 所有终结符的类型
terminal_sign_type = [
'else',
'if',
'int',
'return',
'void',
'while',
'addition',
'subtraction',
'multiplication',
'division',
'bigger',
'bigger-equal',
'smaller',
'smaller-equal',
'equal',
'not-equal',
'evaluate',
'semicolon',
'comma',
'left-parentheses',
'right-parentheses',
'left-bracket',
'right-bracket',
'left-brace',
'right-brace',
'id',
'num',
# 在这之前添加非终结符类型,请务必不要动 'pound'
'pound'
]
# 所有非终结符的类型
non_terminal_sign_type = [
'program',
'define-list',
'define',
'define-type',
'var-define-follow',
'type',
'fun-define-follow',
'params',
'param-list',
'param-follow',
'param',
'array-subscript',
'code-block',
'local-define-list',
'local-var-define',
'code-list',
'code',
'normal-statement',
'normal-statement-follow',
'call-follow',
'call-params',
'call-param-list',
'call-param-follow',
'selection-statement',
'selection-follow',
'iteration-statement',
'iteration-follow',
'return-statement',
'return-follow',
# 'eval-statement',
# 'var',
'var-follow',
'expression',
'expression-follow',
'rel-op',
'additive-expr',
'additive-expr-follow',
'add-op',
'term',
'term-follow',
'mul-op',
'factor',
'id-factor-follow',
'args',
'arg-list',
'arg-list-follow'
]
# 文法产生式
productions = [
# 0
Production('program', ['define-list']),
# 1
Production('define-list', ['define', 'define-list']),
Production('define-list', []),
# 2
Production('define', ['type', 'id', 'define-type']),
# 3
Production('define-type', ['var-define-follow']),
Production('define-type', ['fun-define-follow']),
# 4
Production('var-define-follow', ['semicolon']),
Production('var-define-follow', ['left-bracket', 'num', 'right-bracket', 'semicolon']),
# 5
Production('type', ['int']),
Production('type', ['void']),
# 6
Production('fun-define-follow', ['left-parentheses', 'params', 'right-parentheses', 'code-block']),
# 7
Production('params', ['param-list']),
Production('params', []),
# 8
Production('param-list', ['param', 'param-follow']),
# 9
Production('param-follow', ['comma', 'param', 'param-follow']),
Production('param-follow', []),
# 10
Production('param', ['type', 'id', 'array-subscript']),
# 11
Production('array-subscript', ['left-bracket', 'right-bracket']),
Production('array-subscript', []),
# 12
Production('code-block', ['left-brace', 'local-define-list', 'code-list', 'right-brace']),
# 13
Production('local-define-list', ['local-var-define', 'local-define-list']),
Production('local-define-list', []),
# 14
Production('local-var-define', ['type', 'id', 'var-define-follow']),
# 15
Production('code-list', ['code', 'code-list']),
Production('code-list', []),
# 16
Production('code', ['normal-statement']),
Production('code', ['selection-statement']),
Production('code', ['iteration-statement']),
Production('code', ['return-statement']),
# Production('normal-statement', ['eval-statement', 'semicolon']),
# Production('normal-statement', ['semicolon']),
# 17
Production('normal-statement', ['semicolon']),
Production('normal-statement', ['id', 'normal-statement-follow']),
# 18
Production('normal-statement-follow', ['var-follow', 'evaluate', 'expression', 'semicolon']),
Production('normal-statement-follow', ['call-follow', 'semicolon']),
# 19
Production('call-follow', ['left-parentheses', 'call-params', 'right-parentheses']),
# 20
Production('call-params', ['call-param-list']),
Production('call-params', []),
# 21
Production('call-param-list', ['expression', 'call-param-follow']),
# 22
Production('call-param-follow', ['comma', 'expression', 'call-param-follow']),
Production('call-param-follow', []),
# 23
Production('selection-statement',
['if', 'left-parentheses', 'expression', 'right-parentheses', 'left-brace',
'code-list', 'right-brace', 'selection-follow']),
# 24
Production('selection-follow', ['else', 'left-brace', 'code-list', 'right-brace']),
Production('selection-follow', []),
# 25
Production('iteration-statement', ['while', 'left-parentheses', 'expression',
'right-parentheses', 'iteration-follow']),
# 26
Production('iteration-follow', ['left-brace', 'code-list', 'right-brace']),
Production('iteration-follow', ['code']),
# 27
Production('return-statement', ['return', 'return-follow']),
# 28
Production('return-follow', ['semicolon']),
Production('return-follow', ['expression', 'semicolon']),
# Production('eval-statement', ['var', 'evaluate', 'expression']),
# Production('var', ['id', 'var-follow']),
# 29
Production('var-follow', ['left-bracket', 'expression', 'right-bracket']),
Production('var-follow', []),
# 30
Production('expression', ['additive-expr', 'expression-follow']),
# 31
Production('expression-follow', ['rel-op', 'additive-expr']),
Production('expression-follow', []),
# 32
Production('rel-op', ['smaller-equal']),
Production('rel-op', ['smaller']),
Production('rel-op', ['bigger']),
Production('rel-op', ['bigger-equal']),
Production('rel-op', ['equal']),
Production('rel-op', ['not-equal']),
# 33
Production('additive-expr', ['term', 'additive-expr-follow']),
# 34
Production('additive-expr-follow', ['add-op', 'term', 'additive-expr-follow']),
Production('additive-expr-follow', []),
# 35
Production('add-op', ['addition']),
Production('add-op', ['subtraction']),
# 36
Production('term', ['factor', 'term-follow']),
# 37
Production('term-follow', ['mul-op', 'factor', 'term-follow']),
Production('term-follow', []),
# 38
Production('mul-op', ['multiplication']),
Production('mul-op', ['division']),
# 39
Production('factor', ['left-parentheses', 'expression', 'right-parentheses']),
Production('factor', ['id', 'id-factor-follow']),
Production('factor', ['num']),
# 40
Production('id-factor-follow', ['var-follow']),
Production('id-factor-follow', ['left-parentheses', 'args', 'right-parentheses']),
# 41
Production('args', ['arg-list']),
Production('args', []),
# 42
Production('arg-list', ['expression', 'arg-list-follow']),
Production('arg-list-follow', ['comma', 'expression', 'arg-list-follow']),
Production('arg-list-follow', [])
]
# 文法开始符号
grammar_start = Sign('program') | 29.834975 | 103 | 0.536366 | 2,883 | 0.232088 | 0 | 0 | 0 | 0 | 0 | 0 | 8,035 | 0.646836 |
4015db6712f5e331d7a0bca4b41018047675a6cf | 24,566 | py | Python | redash/models.py | slachiewicz/redash | 84d95272f31885be00fbeef0cdbf6ddae6037f5d | [
"BSD-2-Clause-FreeBSD"
]
| 1 | 2019-06-27T07:40:51.000Z | 2019-06-27T07:40:51.000Z | redash/models.py | slachiewicz/redash | 84d95272f31885be00fbeef0cdbf6ddae6037f5d | [
"BSD-2-Clause-FreeBSD"
]
| 1 | 2021-03-20T05:38:23.000Z | 2021-03-20T05:38:23.000Z | redash/models.py | slachiewicz/redash | 84d95272f31885be00fbeef0cdbf6ddae6037f5d | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | import json
import hashlib
import logging
import os
import threading
import time
import datetime
import itertools
import peewee
from passlib.apps import custom_app_context as pwd_context
from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase
from flask.ext.login import UserMixin, AnonymousUserMixin
import psycopg2
from redash import utils, settings, redis_connection
from redash.query_runner import get_query_runner
class Database(object):
def __init__(self):
self.database_config = dict(settings.DATABASE_CONFIG)
self.database_config['register_hstore'] = False
self.database_name = self.database_config.pop('name')
self.database = PostgresqlExtDatabase(self.database_name, **self.database_config)
self.app = None
self.pid = os.getpid()
def init_app(self, app):
self.app = app
self.register_handlers()
def connect_db(self):
self._check_pid()
self.database.connect()
def close_db(self, exc):
self._check_pid()
if not self.database.is_closed():
self.database.close()
def _check_pid(self):
current_pid = os.getpid()
if self.pid != current_pid:
logging.info("New pid detected (%d!=%d); resetting database lock.", self.pid, current_pid)
self.pid = os.getpid()
self.database._conn_lock = threading.Lock()
def register_handlers(self):
self.app.before_request(self.connect_db)
self.app.teardown_request(self.close_db)
db = Database()
class BaseModel(peewee.Model):
class Meta:
database = db.database
@classmethod
def get_by_id(cls, model_id):
return cls.get(cls.id == model_id)
def pre_save(self, created):
pass
def post_save(self, created):
# Handler for post_save operations. Overriding if needed.
pass
def save(self, *args, **kwargs):
pk_value = self._get_pk_value()
created = kwargs.get('force_insert', False) or not bool(pk_value)
self.pre_save(created)
super(BaseModel, self).save(*args, **kwargs)
self.post_save(created)
class ModelTimestampsMixin(BaseModel):
updated_at = DateTimeTZField(default=datetime.datetime.now)
created_at = DateTimeTZField(default=datetime.datetime.now)
def pre_save(self, created):
super(ModelTimestampsMixin, self).pre_save(created)
self.updated_at = datetime.datetime.now()
class PermissionsCheckMixin(object):
def has_permission(self, permission):
return self.has_permissions((permission,))
def has_permissions(self, permissions):
has_permissions = reduce(lambda a, b: a and b,
map(lambda permission: permission in self.permissions,
permissions),
True)
return has_permissions
class AnonymousUser(AnonymousUserMixin, PermissionsCheckMixin):
@property
def permissions(self):
return []
class ApiUser(UserMixin, PermissionsCheckMixin):
def __init__(self, api_key):
self.id = api_key
def __repr__(self):
return u"<ApiUser: {}>".format(self.id)
@property
def permissions(self):
return ['view_query']
class Group(BaseModel):
DEFAULT_PERMISSIONS = ['create_dashboard', 'create_query', 'edit_dashboard', 'edit_query',
'view_query', 'view_source', 'execute_query']
id = peewee.PrimaryKeyField()
name = peewee.CharField(max_length=100)
permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS)
tables = ArrayField(peewee.CharField)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'groups'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'permissions': self.permissions,
'tables': self.tables,
'created_at': self.created_at
}
def __unicode__(self):
return unicode(self.id)
class User(ModelTimestampsMixin, BaseModel, UserMixin, PermissionsCheckMixin):
DEFAULT_GROUPS = ['default']
id = peewee.PrimaryKeyField()
name = peewee.CharField(max_length=320)
email = peewee.CharField(max_length=320, index=True, unique=True)
password_hash = peewee.CharField(max_length=128, null=True)
groups = ArrayField(peewee.CharField, default=DEFAULT_GROUPS)
class Meta:
db_table = 'users'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'email': self.email,
'updated_at': self.updated_at,
'created_at': self.created_at
}
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
self._allowed_tables = None
@property
def permissions(self):
# TODO: this should be cached.
return list(itertools.chain(*[g.permissions for g in
Group.select().where(Group.name << self.groups)]))
@property
def allowed_tables(self):
# TODO: cache this as weel
if self._allowed_tables is None:
self._allowed_tables = set([t.lower() for t in itertools.chain(*[g.tables for g in
Group.select().where(Group.name << self.groups)])])
return self._allowed_tables
@classmethod
def get_by_email(cls, email):
return cls.get(cls.email == email)
def __unicode__(self):
return '%r, %r' % (self.name, self.email)
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return self.password_hash and pwd_context.verify(password, self.password_hash)
class ActivityLog(BaseModel):
QUERY_EXECUTION = 1
id = peewee.PrimaryKeyField()
user = peewee.ForeignKeyField(User)
type = peewee.IntegerField()
activity = peewee.TextField()
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'activity_log'
def to_dict(self):
return {
'id': self.id,
'user': self.user.to_dict(),
'type': self.type,
'activity': self.activity,
'created_at': self.created_at
}
def __unicode__(self):
return unicode(self.id)
class DataSource(BaseModel):
id = peewee.PrimaryKeyField()
name = peewee.CharField(unique=True)
type = peewee.CharField()
options = peewee.TextField()
queue_name = peewee.CharField(default="queries")
scheduled_queue_name = peewee.CharField(default="queries")
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'data_sources'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'type': self.type,
'syntax': self.query_runner.syntax
}
def get_schema(self, refresh=False):
key = "data_source:schema:{}".format(self.id)
cache = None
if not refresh:
cache = redis_connection.get(key)
if cache is None:
query_runner = self.query_runner
schema = sorted(query_runner.get_schema(), key=lambda t: t['name'])
redis_connection.set(key, json.dumps(schema))
else:
schema = json.loads(cache)
return schema
@property
def query_runner(self):
return get_query_runner(self.type, self.options)
@classmethod
def all(cls):
return cls.select().order_by(cls.id.asc())
class QueryResult(BaseModel):
id = peewee.PrimaryKeyField()
data_source = peewee.ForeignKeyField(DataSource)
query_hash = peewee.CharField(max_length=32, index=True)
query = peewee.TextField()
data = peewee.TextField()
runtime = peewee.FloatField()
retrieved_at = DateTimeTZField()
class Meta:
db_table = 'query_results'
def to_dict(self):
return {
'id': self.id,
'query_hash': self.query_hash,
'query': self.query,
'data': json.loads(self.data),
'data_source_id': self._data.get('data_source', None),
'runtime': self.runtime,
'retrieved_at': self.retrieved_at
}
@classmethod
def unused(cls):
week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
unused_results = cls.select().where(Query.id == None, cls.retrieved_at < week_ago)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)
return unused_results
@classmethod
def get_latest(cls, data_source, query, max_age=0):
query_hash = utils.gen_query_hash(query)
if max_age == -1:
query = cls.select().where(cls.query_hash == query_hash,
cls.data_source == data_source).order_by(cls.retrieved_at.desc())
else:
query = cls.select().where(cls.query_hash == query_hash, cls.data_source == data_source,
peewee.SQL("retrieved_at + interval '%s second' >= now() at time zone 'utc'",
max_age)).order_by(cls.retrieved_at.desc())
return query.first()
@classmethod
def store_result(cls, data_source_id, query_hash, query, data, run_time, retrieved_at):
query_result = cls.create(query_hash=query_hash,
query=query,
runtime=run_time,
data_source=data_source_id,
retrieved_at=retrieved_at,
data=data)
logging.info("Inserted query (%s) data; id=%s", query_hash, query_result.id)
updated_count = Query.update(latest_query_data=query_result).\
where(Query.query_hash==query_hash, Query.data_source==data_source_id).\
execute()
logging.info("Updated %s queries with result (%s).", updated_count, query_hash)
return query_result
def __unicode__(self):
return u"%d | %s | %s" % (self.id, self.query_hash, self.retrieved_at)
def should_schedule_next(previous_iteration, now, schedule):
if schedule.isdigit():
ttl = int(schedule)
next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)
else:
hour, minute = schedule.split(':')
hour, minute = int(hour), int(minute)
# The following logic is needed for cases like the following:
# - The query scheduled to run at 23:59.
# - The scheduler wakes up at 00:01.
# - Using naive implementation of comparing timestamps, it will skip the execution.
normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)
if normalized_previous_iteration > previous_iteration:
previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)
next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute)
return now > next_iteration
class Query(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
data_source = peewee.ForeignKeyField(DataSource)
latest_query_data = peewee.ForeignKeyField(QueryResult, null=True)
name = peewee.CharField(max_length=255)
description = peewee.CharField(max_length=4096, null=True)
query = peewee.TextField()
query_hash = peewee.CharField(max_length=32)
api_key = peewee.CharField(max_length=40)
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
last_modified_by = peewee.ForeignKeyField(User, null=True, related_name="modified_queries")
is_archived = peewee.BooleanField(default=False, index=True)
schedule = peewee.CharField(max_length=10, null=True)
class Meta:
db_table = 'queries'
def to_dict(self, with_stats=False, with_visualizations=False, with_user=True):
d = {
'id': self.id,
'latest_query_data_id': self._data.get('latest_query_data', None),
'name': self.name,
'description': self.description,
'query': self.query,
'query_hash': self.query_hash,
'schedule': self.schedule,
'api_key': self.api_key,
'is_archived': self.is_archived,
'updated_at': self.updated_at,
'created_at': self.created_at,
'data_source_id': self._data.get('data_source', None)
}
if with_user:
d['user'] = self.user.to_dict()
d['last_modified_by'] = self.last_modified_by.to_dict()
else:
d['user_id'] = self._data['user']
if with_stats:
d['retrieved_at'] = self.retrieved_at
d['runtime'] = self.runtime
if with_visualizations:
d['visualizations'] = [vis.to_dict(with_query=False)
for vis in self.visualizations]
return d
def archive(self):
self.is_archived = True
self.schedule = None
for vis in self.visualizations:
for w in vis.widgets:
w.delete_instance()
self.save()
@classmethod
def all_queries(cls):
q = Query.select(Query, User, QueryResult.retrieved_at, QueryResult.runtime)\
.join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)\
.switch(Query).join(User)\
.where(Query.is_archived==False)\
.group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)\
.order_by(cls.created_at.desc())
return q
@classmethod
def outdated_queries(cls):
queries = cls.select(cls, QueryResult.retrieved_at, DataSource)\
.join(QueryResult)\
.switch(Query).join(DataSource)\
.where(cls.schedule != None)
now = datetime.datetime.utcnow().replace(tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=0, name=None))
outdated_queries = {}
for query in queries:
if should_schedule_next(query.latest_query_data.retrieved_at, now, query.schedule):
key = "{}:{}".format(query.query_hash, query.data_source.id)
outdated_queries[key] = query
return outdated_queries.values()
@classmethod
def search(cls, term):
# This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution.
where = (cls.name**u"%{}%".format(term)) | (cls.description**u"%{}%".format(term))
if term.isdigit():
where |= cls.id == term
where &= cls.is_archived == False
return cls.select().where(where).order_by(cls.created_at.desc())
@classmethod
def recent(cls, user_id):
# TODO: instead of t2 here, we should define table_alias for Query table
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")).\
join(Event, on=(Query.id == peewee.SQL("t2.object_id::integer"))).\
where(Event.action << ('edit', 'execute', 'edit_name', 'edit_description', 'view_source')).\
where(Event.user == user_id).\
where(~(Event.object_id >> None)).\
where(Event.object_type == 'query'). \
where(cls.is_archived == False).\
group_by(Event.object_id, Query.id).\
order_by(peewee.SQL("count(0) desc"))
@classmethod
def update_instance(cls, query_id, **kwargs):
if 'query' in kwargs:
kwargs['query_hash'] = utils.gen_query_hash(kwargs['query'])
update = cls.update(**kwargs).where(cls.id == query_id)
return update.execute()
def pre_save(self, created):
super(Query, self).pre_save(created)
self.query_hash = utils.gen_query_hash(self.query)
self._set_api_key()
if self.last_modified_by is None:
self.last_modified_by = self.user
def post_save(self, created):
if created:
self._create_default_visualizations()
def _create_default_visualizations(self):
table_visualization = Visualization(query=self, name="Table",
description='',
type="TABLE", options="{}")
table_visualization.save()
def _set_api_key(self):
if not self.api_key:
self.api_key = hashlib.sha1(
u''.join((str(time.time()), self.query, str(self._data['user']), self.name)).encode('utf-8')).hexdigest()
@property
def runtime(self):
return self.latest_query_data.runtime
@property
def retrieved_at(self):
return self.latest_query_data.retrieved_at
def __unicode__(self):
return unicode(self.id)
class Dashboard(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
slug = peewee.CharField(max_length=140, index=True)
name = peewee.CharField(max_length=100)
user_email = peewee.CharField(max_length=360, null=True)
user = peewee.ForeignKeyField(User)
layout = peewee.TextField()
dashboard_filters_enabled = peewee.BooleanField(default=False)
is_archived = peewee.BooleanField(default=False, index=True)
class Meta:
db_table = 'dashboards'
def to_dict(self, with_widgets=False):
layout = json.loads(self.layout)
if with_widgets:
widgets = Widget.select(Widget, Visualization, Query, User)\
.where(Widget.dashboard == self.id)\
.join(Visualization, join_type=peewee.JOIN_LEFT_OUTER)\
.join(Query, join_type=peewee.JOIN_LEFT_OUTER)\
.join(User, join_type=peewee.JOIN_LEFT_OUTER)
widgets = {w.id: w.to_dict() for w in widgets}
# The following is a workaround for cases when the widget object gets deleted without the dashboard layout
# updated. This happens for users with old databases that didn't have a foreign key relationship between
# visualizations and widgets.
# It's temporary until better solution is implemented (we probably should move the position information
# to the widget).
widgets_layout = []
for row in layout:
new_row = []
for widget_id in row:
widget = widgets.get(widget_id, None)
if widget:
new_row.append(widget)
widgets_layout.append(new_row)
# widgets_layout = map(lambda row: map(lambda widget_id: widgets.get(widget_id, None), row), layout)
else:
widgets_layout = None
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'user_id': self._data['user'],
'layout': layout,
'dashboard_filters_enabled': self.dashboard_filters_enabled,
'widgets': widgets_layout,
'updated_at': self.updated_at,
'created_at': self.created_at
}
@classmethod
def get_by_slug(cls, slug):
return cls.get(cls.slug == slug)
@classmethod
def recent(cls, user_id):
return cls.select().where(Event.created_at > peewee.SQL("current_date - 7")). \
join(Event, on=(Dashboard.id == peewee.SQL("t2.object_id::integer"))). \
where(Event.action << ('edit', 'view')).\
where(Event.user == user_id). \
where(~(Event.object_id >> None)). \
where(Event.object_type == 'dashboard'). \
group_by(Event.object_id, Dashboard.id). \
order_by(peewee.SQL("count(0) desc"))
def save(self, *args, **kwargs):
if not self.slug:
self.slug = utils.slugify(self.name)
tries = 1
while self.select().where(Dashboard.slug == self.slug).first() is not None:
self.slug = utils.slugify(self.name) + "_{0}".format(tries)
tries += 1
super(Dashboard, self).save(*args, **kwargs)
def __unicode__(self):
return u"%s=%s" % (self.id, self.name)
class Visualization(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
type = peewee.CharField(max_length=100)
query = peewee.ForeignKeyField(Query, related_name='visualizations')
name = peewee.CharField(max_length=255)
description = peewee.CharField(max_length=4096, null=True)
options = peewee.TextField()
class Meta:
db_table = 'visualizations'
def to_dict(self, with_query=True):
d = {
'id': self.id,
'type': self.type,
'name': self.name,
'description': self.description,
'options': json.loads(self.options),
'updated_at': self.updated_at,
'created_at': self.created_at
}
if with_query:
d['query'] = self.query.to_dict()
return d
def __unicode__(self):
return u"%s %s" % (self.id, self.type)
class Widget(ModelTimestampsMixin, BaseModel):
id = peewee.PrimaryKeyField()
visualization = peewee.ForeignKeyField(Visualization, related_name='widgets', null=True)
text = peewee.TextField(null=True)
width = peewee.IntegerField()
options = peewee.TextField()
dashboard = peewee.ForeignKeyField(Dashboard, related_name='widgets', index=True)
# unused; kept for backward compatability:
type = peewee.CharField(max_length=100, null=True)
query_id = peewee.IntegerField(null=True)
class Meta:
db_table = 'widgets'
def to_dict(self):
d = {
'id': self.id,
'width': self.width,
'options': json.loads(self.options),
'dashboard_id': self._data['dashboard'],
'text': self.text,
'updated_at': self.updated_at,
'created_at': self.created_at
}
if self.visualization and self.visualization.id:
d['visualization'] = self.visualization.to_dict()
return d
def __unicode__(self):
return u"%s" % self.id
def delete_instance(self, *args, **kwargs):
layout = json.loads(self.dashboard.layout)
layout = map(lambda row: filter(lambda w: w != self.id, row), layout)
layout = filter(lambda row: len(row) > 0, layout)
self.dashboard.layout = json.dumps(layout)
self.dashboard.save()
super(Widget, self).delete_instance(*args, **kwargs)
class Event(BaseModel):
user = peewee.ForeignKeyField(User, related_name="events", null=True)
action = peewee.CharField()
object_type = peewee.CharField()
object_id = peewee.CharField(null=True)
additional_properties = peewee.TextField(null=True)
created_at = DateTimeTZField(default=datetime.datetime.now)
class Meta:
db_table = 'events'
def __unicode__(self):
return u"%s,%s,%s,%s" % (self._data['user'], self.action, self.object_type, self.object_id)
@classmethod
def record(cls, event):
user = event.pop('user_id')
action = event.pop('action')
object_type = event.pop('object_type')
object_id = event.pop('object_id', None)
created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))
additional_properties = json.dumps(event)
event = cls.create(user=user, action=action, object_type=object_type, object_id=object_id,
additional_properties=additional_properties, created_at=created_at)
return event
all_models = (DataSource, User, QueryResult, Query, Dashboard, Visualization, Widget, ActivityLog, Group, Event)
def init_db():
Group.insert(name='admin', permissions=['admin'], tables=['*']).execute()
Group.insert(name='default', permissions=Group.DEFAULT_PERMISSIONS, tables=['*']).execute()
def create_db(create_tables, drop_tables):
db.connect_db()
for model in all_models:
if drop_tables and model.table_exists():
# TODO: submit PR to peewee to allow passing cascade option to drop_table.
db.database.execute_sql('DROP TABLE %s CASCADE' % model._meta.db_table)
if create_tables and not model.table_exists():
model.create_table()
db.close_db(None)
| 33.790922 | 121 | 0.620492 | 22,373 | 0.91073 | 0 | 0 | 6,619 | 0.269437 | 0 | 0 | 2,856 | 0.116258 |
40163fa4a642e9716f853bee7c3624573ecfac17 | 10,112 | py | Python | xclib/classifier/ova.py | sushantsondhi/pyxclib | ecdfab6b72f9a02892eee617f45bef73c928ca81 | [
"MIT"
]
| 4 | 2019-07-11T14:43:22.000Z | 2019-08-08T19:12:53.000Z | xclib/classifier/ova.py | kunaldahiya/xclib | b40e4dd49533ac78231a12f8af362e7f8c6f5df2 | [
"MIT"
]
| null | null | null | xclib/classifier/ova.py | kunaldahiya/xclib | b40e4dd49533ac78231a12f8af362e7f8c6f5df2 | [
"MIT"
]
| null | null | null | import numpy as np
import time
import logging
from .base import BaseClassifier
import scipy.sparse as sp
from ._svm import train_one
from functools import partial
from ..utils import sparse
from ..data import data_loader
from ._svm import train_one, _get_liblinear_solver_type
from joblib import Parallel, delayed
from ..utils.matrix import SMatrix
from tqdm import tqdm
def separate(result):
return [item[0] for item in result], [item[1] for item in result]
def convert_to_sparse(weight, bias):
weight = np.vstack(weight).squeeze()
bias = np.vstack(bias).squeeze()
return sp.csr_matrix(weight), sp.csr_matrix(bias).transpose()
class OVAClassifier(BaseClassifier):
"""
One-vs-all classifier for sparse or dense data
(suitable for large label set)
Parameters:
-----------
solver: str, optional, default='liblinear'
solver to use
loss: str, optional, default='squared_hinge'
loss to optimize,
- hinge
- squared_hinge
C: float, optional, default=1.0
cost in svm
verbose: int, optional, default=0
print progress in svm
max_iter: int, optional, default=20
iteration in solver
tol: float, optional, default=0.1
tolerance in solver
threshold: float, optional, default=0.01
threshold for hard thresholding (after training classifier)
- bias values are not touched
- 0.01: for sparse features
- 1e-5: for dense features
feature_type: str, optional, default='sparse'
feature type: sparse or dense
dual: boolean, optional, default=true
solve in primal or dual
use_bias: boolean, optional, default=True
train bias parameter or not
num_threads: int, optional, default=10
use multiple threads to parallelize
batch_size: int, optional, default=1000
train these many classifiers in parallel
norm: str, optional, default='l2'
normalize features
penalty: str, optional, default='l2'
l1 or l2 regularizer
"""
def __init__(self, solver='liblinear', loss='squared_hinge', C=1.0,
verbose=0, max_iter=20, tol=0.1, threshold=0.01,
feature_type='sparse', dual=True, use_bias=True,
num_threads=12, batch_size=1000, norm='l2', penalty='l2'):
super().__init__(verbose, use_bias, feature_type)
self.loss = loss
self.C = C
self.penalty = penalty
self.norm = norm
self.num_threads = num_threads
self.verbose = verbose
self.max_iter = max_iter
self.threshold = threshold
self.tol = tol
self.dual = dual
self.batch_size = batch_size
self.num_labels = None
self.valid_labels = None
self.num_labels_ = None
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger('OVAClassifier')
self.logger.info("Parameters:: {}".format(str(self)))
def _merge_weights(self, weights, biases):
# Bias is always a dense array
if self.feature_type == 'sparse':
self.weight = sp.vstack(
weights, format='csr', dtype=np.float32)
self.bias = sp.vstack(
biases, format='csr', dtype=np.float32).toarray()
else:
self.weight = np.vstack(weights).astype(np.float32).squeeze()
self.bias = np.vstack(biases).astype(np.float32)
def get_data_loader(self, data_dir, dataset, feat_fname,
label_fname, mode, batch_order):
"""Data loader
- batch_order: 'label' during training
- batch_order: 'instances' during prediction
"""
return data_loader.Dataloader(
batch_size=self.batch_size,
data_dir=data_dir,
dataset=dataset,
feat_fname=feat_fname,
label_fname=label_fname,
feature_type=self.feature_type,
mode=mode,
batch_order=batch_order,
norm=self.norm,
start_index=0,
end_index=-1)
def fit(self, data_dir, dataset, feat_fname, label_fname,
model_dir, save_after=1):
"""Train the classifier
Will create batches on labels and then parallelize
- Not very efficient when training time per classifier is too low
- Will not train for labels without any datapoints
A list will be maintained which will used to remap labels
to original ids
Arguments:
---------
data_dir: str
data directory with all files
dataset: str
Name of the dataset; like EURLex-4K
feat_fname: str
File name of training feature file
Should be in sparse format with header
label_fname: str
File name of training label file
Should be in sparse format with header
model_dir: str
dump checkpoints in this directory
based on save_after
save_after: int, default=1
save checkpoints after these many steps
"""
self.logger.info("Training!")
data = self.get_data_loader(
data_dir, dataset, feat_fname, label_fname, 'train', 'labels')
self.num_labels = data.num_labels # valid labels
self.num_labels_ = data.num_labels_ # number of original labels
self.valid_labels = data.valid_labels
weights, biases = [], []
run_time = 0.0
start_time = time.time()
idx = 0
for batch_data in tqdm(data):
start_time = time.time()
batch_weight, batch_bias = self._train(
batch_data, self.num_threads)
del batch_data
if self.feature_type == 'sparse':
batch_weight, batch_bias = convert_to_sparse(
batch_weight, batch_bias)
batch_time = time.time() - start_time
run_time += batch_time
weights.append(batch_weight), biases.extend(batch_bias)
if idx != 0 and idx % save_after == 0:
# TODO: Delete these to save memory?
self._merge_weights(weights, biases)
self._save_state(model_dir, idx)
self.logger.info("Saved state at epoch: {}".format(idx))
idx += 1
self._merge_weights(weights, biases)
self.logger.info("Training time (sec): {}, model size (MB): {}".format(
run_time, self.model_size))
def _train(self, data, num_threads):
"""Train SVM for multiple labels
Arguments:
---------
data: list
[{'X': X, 'Y': y}]
Returns
-------
weights: np.ndarray
weight of the classifier
bias: float
bias of the classifier
"""
_func = self._get_partial_train()
with Parallel(n_jobs=num_threads) as parallel:
result = parallel(delayed(_func)(d) for d in data)
weights, biases = separate(result)
del result
return weights, biases
def predict(self, data_dir, dataset, feat_fname, label_fname, top_k=10):
"""Predict using the classifier
Will create batches on instance and then parallelize
Arguments:
---------
data_dir: str
data directory with all files
dataset: str
Name of the dataset; like EURLex-4K
feat_fname: str
File name of training feature file
Should be in sparse format with header
label_fname: str
File name of training label file
Should be in sparse format with header
TODO: Avoid sending labels as they are not used
"""
self._transpose_weights()
self.logger.info("Predicting!")
use_sparse = self.feature_type == 'sparse'
data = self.get_data_loader(
data_dir, dataset, feat_fname, label_fname, 'predict', 'instances')
num_instances = data.num_instances
predicted_labels = SMatrix(
n_rows=num_instances,
n_cols=self.num_labels,
nnz=top_k)
start_time = time.time()
start_idx = 0
for batch_data in tqdm(data):
pred = batch_data['data'][batch_data['ind']
] @ self.weight + self.bias
predicted_labels.update_block(
start_idx,
ind=None,
val=pred.view(np.ndarray) if use_sparse else pred)
start_idx += pred.shape[0]
end_time = time.time()
self.logger.info(
"Prediction time/sample (ms): {}".format(
(end_time-start_time)*1000/num_instances))
return self._map_to_original(predicted_labels.data())
def _get_partial_train(self):
return partial(train_one, solver_type=self.solver, C=self.C,
verbose=self.verbose, max_iter=self.max_iter,
threshold=self.threshold, tol=self.tol,
intercept_scaling=1.0, fit_intercept=self.use_bias,
epsilon=0)
def _map_to_original(self, X):
"""Some labels were removed during training as training data was
not availale; remap to original mapping
- Assumes documents need not be remapped
"""
shape = (X.shape[0], self.num_labels_)
return sparse._map_cols(X, self.valid_labels, shape)
def _transpose_weights(self):
self.weight = self.weight.transpose()
self.bias = self.bias.transpose()
def __repr__(self):
s = "C: {C}, max_iter: {max_iter}, threshold: {threshold}" \
", loss: {loss}, dual: {dual}, bias: {use_bias}, norm: {norm}" \
", num_threads: {num_threads}, batch_size: {batch_size}"\
", tol: {tol}, penalty: {penalty}"
return s.format(**self.__dict__)
@property
def solver(self):
return _get_liblinear_solver_type(
'ovr', self.penalty, self.loss, self.dual)
| 37.313653 | 79 | 0.598497 | 9,461 | 0.935621 | 0 | 0 | 129 | 0.012757 | 0 | 0 | 4,037 | 0.399229 |
40179a2e52133e978bed3c8e59ac4742ba5dae20 | 6,555 | py | Python | ipgroup_test.py | RyPeck/python-ipgroup | 8fb1037d886a52127e7231f051403396dcb4dc60 | [
"Apache-2.0"
]
| 1 | 2015-01-10T18:34:51.000Z | 2015-01-10T18:34:51.000Z | ipgroup_test.py | RyPeck/python-ipgroup | 8fb1037d886a52127e7231f051403396dcb4dc60 | [
"Apache-2.0"
]
| null | null | null | ipgroup_test.py | RyPeck/python-ipgroup | 8fb1037d886a52127e7231f051403396dcb4dc60 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
import ipaddress
import random
import unittest
import ipgroup
class TestGroupIPs(unittest.TestCase):
def setUp(self):
pass
def test_group(self):
IPs = ["127.0.0.1",
"127.0.1.1",
"127.1.1.1",
"127.1.0.1",
"127.2.0.1",
"127.2.1.1",
]
expected_results = {"127.0.0.0/16": 2,
"127.1.0.0/16": 2,
"127.2.0.0/16": 2,
}
a = ipgroup.IPv4Group(IPs, 16)
self.assertEqual(expected_results, a.group)
def test_group2(self):
IPs = ["127.0.0.1",
"127.0.1.1",
"127.1.1.1",
"127.1.0.1",
"127.2.0.1",
"127.2.1.1",
]
expected_results = {"127.0.0.0/24": 1,
"127.0.1.0/24": 1,
"127.1.0.0/24": 1,
"127.1.1.0/24": 1,
"127.2.0.0/24": 1,
"127.2.1.0/24": 1,
}
b = ipgroup.IPv4Group(IPs, 24)
self.assertEqual(expected_results, b.group)
def test_group3(self):
""" 'Random' test """
# Small Netblock so we don't do over 2**10 hosts to test with
random_cidr = random.randint(22, 30)
network = ipaddress.IPv4Network(("129.21.0.0/%s" % random_cidr))
# So out sample size is never bigger than the population of hosts
random_int = random.randint(1, 2**(32 - random_cidr - 1))
IPs = random.sample(set(network.hosts()), random_int)
expected_results = {("129.21.0.0/%s" % random_cidr): random_int}
c = ipgroup.IPv4Group(IPs, random_cidr)
self.assertEqual(expected_results, c.group)
def test_IPv6(self):
""" 'Random' test """
# Small Netblock so we don't do over 2**10 hosts to test with
random_cidr = random.randint(118, 126)
network = ipaddress.IPv6Network(("2607:f8b0:4009:803::/%s" %
random_cidr))
# So out sample size is never bigger than the population of hosts
random_int = random.randint(1, 2**(128 - random_cidr - 1))
IPs = random.sample(set(network.hosts()), random_int)
expected_results = {("2607:f8b0:4009:803::/%s" % random_cidr):
random_int}
d = ipgroup.IPv6Group(IPs, random_cidr)
self.assertEqual(expected_results, d.group)
def test_reGroup(self):
IPs = ["127.0.0.1",
"127.1.0.1",
"127.1.1.1",
]
expected_results1 = {"127.0.0.0/24": 1,
"127.1.0.0/24": 1,
"127.1.1.0/24": 1,
}
g = ipgroup.IPv4Group(IPs, 24)
self.assertEqual(expected_results1, g.group)
expected_results2 = {"127.0.0.0/16": 1,
"127.1.0.0/16": 2,
}
g.reGroup(16)
self.assertEqual(expected_results2, g.group)
class TestTotalAddresses(unittest.TestCase):
"""
Tests totalAddresses function returns correct number of unique addresses
in various scenarios
"""
def setUp(self):
pass
def test_total_address1(self):
self.assertEqual(8, ipgroup.totalAddresses("127.0.0.0/29"))
def test_total_address2(self):
total = ipgroup.totalAddresses(["192.168.1.1/16",
"127.0.0.0/16",
])
self.assertEqual(2**17, total)
def test_total_address3(self):
total = ipgroup.totalAddresses(["192.168.1.1/16",
"127.0.0.0/28"
])
self.assertEqual((2**16 + 2**4), total)
def test_total_address4(self):
total = ipgroup.totalAddresses(["128.151.2.0/24",
"128.151.2.0/30",
])
self.assertEqual(2**8, total)
def test_total_address5(self):
total = ipgroup.totalAddresses(["128.151.2.0/24",
"128.151.2.0/23",
])
self.assertEqual(2**9, total)
def test_total_address_overlapping(self):
""" For the scenario where networks will contain eachother. """
total = ipgroup.totalAddresses(["129.21.0.0/16",
"129.21.1.0/18",
"129.21.1.0/24",
])
self.assertEqual(2**16, total)
def test_total_address_overlapping2(self):
""" For the scenario where networks will contain eachother big networks
to show that the function is fast, no longer enumerating all networks.
"""
total = ipgroup.totalAddresses(["1.0.0.0/8",
"2.0.0.0/8",
"2.0.0.0/16",
"2.1.1.0/24",
"1.0.0.0/16",
"1.1.1.0/24",
"2.0.0.0/8",
])
self.assertEqual((2**24 + 2**24), total)
def test_total_address_overlapping3(self):
""" For the scenario where networks will contain eachother big networks
to show that the function is fast, no longer enumerating all networks.
"""
total = ipgroup.totalAddresses(["1.0.0.0/8",
"1.0.0.0/4",
"2.0.0.0/8",
"2.0.0.0/16",
"2.1.1.0/24",
"1.0.0.0/16",
"1.1.1.0/24",
"2.0.0.0/8",
])
self.assertEqual(2**28, total)
def test_total_address_overlap_IPv6(self):
total = ipgroup.totalAddresses(['2620:008d:8000::/48',
'2620:008d:8000:e693::/64',
])
self.assertEqual(2**80, total)
if __name__ == "__main__":
unittest.main()
| 31.666667 | 79 | 0.438139 | 6,413 | 0.978337 | 0 | 0 | 0 | 0 | 0 | 0 | 1,670 | 0.254767 |
4017c147f527555c7fa69c7bf75c0f142e6a0a28 | 2,566 | py | Python | progress.py | PsiLupan/calcprogress | 05b77e1eedb7726c34f545e10837283e2a1c6180 | [
"MIT"
]
| 2 | 2022-03-07T06:41:35.000Z | 2022-03-11T04:26:40.000Z | progress.py | PsiLupan/calcprogress | 05b77e1eedb7726c34f545e10837283e2a1c6180 | [
"MIT"
]
| 1 | 2022-02-22T02:08:06.000Z | 2022-02-22T02:08:06.000Z | progress.py | PsiLupan/calcprogress | 05b77e1eedb7726c34f545e10837283e2a1c6180 | [
"MIT"
]
| 1 | 2022-02-21T19:47:10.000Z | 2022-02-21T19:47:10.000Z | from dataclasses import dataclass
from pickle import FALSE
from dol import Dol
from asm_section_list import AsmSection, AsmSectionType
@dataclass
class Slice:
start: int
end: int
def size(self) -> int:
assert self.end > self.start
return self.end - self.start
def contains_section(self, sect: AsmSection) -> bool:
return self.start <= sect.start and self.end > sect.start + sect.size
@dataclass
class SliceGroup:
name: str
slices: list[Slice]
def total_size(self) -> int:
size = 0
for _slice in self.slices:
size += _slice.size()
return size
def contains_section(self, sect: AsmSection) -> bool:
for _slice in self.slices:
if _slice.contains_section(sect):
return True
return False
def calc_generic_progress(dol: Dol, asm_list: list[AsmSection]):
# Sum up code/data in ASM
asm_code_size = 0
asm_data_size = 0
for section in asm_list:
if section.type == AsmSectionType.CODE:
asm_code_size += section.size
elif section.type == AsmSectionType.DATA:
asm_data_size += section.size
else:
assert False, f"Invalid section type ({section.type})!"
# Dol sizes
dol_code_size = dol.code_size()
dol_data_size = dol.data_size()
# Decompiled sizes
decomp_code_size = dol_code_size - asm_code_size
decomp_data_size = dol_data_size - asm_data_size
# Percentages
code_percent = decomp_code_size / dol_code_size
data_percent = decomp_data_size / dol_data_size
print(f"Code sections: {decomp_code_size} / {dol_code_size} bytes in src ({code_percent:%})")
print(f"Data sections: {decomp_data_size} / {dol_data_size} bytes in src ({data_percent:%})")
def calc_slice_progress(slices: SliceGroup, asm_list: list[AsmSection]):
asm_slice_size = 0
for section in asm_list:
if slices.contains_section(section):
if section.type == AsmSectionType.CODE:
asm_slice_size += section.size
elif section.type == AsmSectionType.DATA:
asm_slice_size += section.size
else:
assert False, f"Invalid section type ({section.type})!"
# Dol sizes
dol_slice_size = slices.total_size()
# Decompiled sizes
decomp_slice_size = dol_slice_size - asm_slice_size
# Percentages
slice_percent = decomp_slice_size / dol_slice_size
print(f"\t{slices.name}: {decomp_slice_size} / {dol_slice_size} bytes in src ({slice_percent:%})") | 33.763158 | 102 | 0.66212 | 663 | 0.258379 | 0 | 0 | 685 | 0.266952 | 0 | 0 | 454 | 0.176929 |
4018589aba6937e4ecc7ee0d948bf2a417774d03 | 13,993 | py | Python | main_qm9.py | maxxxzdn/en_flows | 04ed4dd45431cafcd23f8bf5199a47f917a72058 | [
"MIT"
]
| null | null | null | main_qm9.py | maxxxzdn/en_flows | 04ed4dd45431cafcd23f8bf5199a47f917a72058 | [
"MIT"
]
| null | null | null | main_qm9.py | maxxxzdn/en_flows | 04ed4dd45431cafcd23f8bf5199a47f917a72058 | [
"MIT"
]
| null | null | null | import utils
import argparse
import wandb
from os.path import join
from qm9 import dataset
from qm9 import losses
from qm9.models import get_optim, get_model
from flows.utils import assert_mean_zero_with_mask, remove_mean_with_mask,\
assert_correctly_masked
import torch
import time
import pickle
import numpy as np
import qm9.visualizer as vis
from qm9.analyze import analyze_stability_for_molecules
from qm9.utils import prepare_context
from qm9.sampling import sample_chain, sample
from qm9 import mol_dim
parser = argparse.ArgumentParser(description='SE3')
parser.add_argument('--exp_name', type=str, default='debug_10')
parser.add_argument('--model', type=str, default='egnn_dynamics',
help='our_dynamics | schnet | simple_dynamics | '
'kernel_dynamics | egnn_dynamics |gnn_dynamics')
parser.add_argument('--n_epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--brute_force', type=eval, default=False,
help='True | False')
parser.add_argument('--actnorm', type=eval, default=True,
help='True | False')
parser.add_argument('--break_train_epoch', type=eval, default=False,
help='True | False')
parser.add_argument('--dp', type=eval, default=True,
help='True | False')
parser.add_argument('--condition_time', type=eval, default=True,
help='True | False')
parser.add_argument('--clip_grad', type=eval, default=True,
help='True | False')
parser.add_argument('--trace', type=str, default='hutch',
help='hutch | exact')
parser.add_argument('--n_layers', type=int, default=6,
help='number of layers')
parser.add_argument('--nf', type=int, default=64,
help='number of layers')
parser.add_argument('--ode_regularization', type=float, default=1e-3)
parser.add_argument('--dataset', type=str, default='qm9',
help='qm9 | qm9_positional')
parser.add_argument('--dequantization', type=str, default='argmax_variational',
help='uniform | variational | argmax_variational')
parser.add_argument('--tanh', type=eval, default=True,
help='use tanh in the coord_mlp')
parser.add_argument('--attention', type=eval, default=True,
help='use attention in the EGNN')
parser.add_argument('--n_report_steps', type=int, default=1)
parser.add_argument('--wandb_usr', type=str, default='')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--save_model', type=eval, default=True,
help='save model')
parser.add_argument('--generate_epochs', type=int, default=1,
help='save model')
parser.add_argument('--num_workers', type=int, default=0, help='Number of worker for the dataloader')
parser.add_argument('--test_epochs', type=int, default=1)
parser.add_argument('--physics', type=int, default=0, help='Minimize energy loss or not')
parser.add_argument('--data_augmentation', type=eval, default=False,
help='use attention in the EGNN')
parser.add_argument('--x_aggregation', type=str, default='sum',
help='sum | mean')
parser.add_argument("--conditioning", nargs='+', default=[],
help='multiple arguments can be passed, '
'including: homo | onehot | lumo | num_atoms | etc. '
'usage: "--conditioning H_thermo homo onehot H_thermo"')
parser.add_argument('--resume', type=str, default=None,
help='')
parser.add_argument('--start_epoch', type=int, default=0,
help='')
args, unparsed_args = parser.parse_known_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
dtype = torch.float32
if args.resume is not None:
exp_name = args.exp_name + '_resume'
start_epoch = args.start_epoch
resume = args.resume
wandb_usr = args.wandb_usr
with open(join(args.resume, 'args.pickle'), 'rb') as f:
args = pickle.load(f)
args.resume = resume
args.break_train_epoch = False
args.exp_name = exp_name
args.start_epoch = start_epoch
args.wandb_usr = wandb_usr
print(args)
utils.create_folders(args)
print(args)
# Log all args to wandb
wandb.init(entity='aipp', project='eegVAE', name=args.exp_name, config=args)
wandb.save('*.txt')
# Retrieve QM9 dataloaders
dataloaders, charge_scale = dataset.retrieve_dataloaders(args.batch_size, args.num_workers)
data_dummy = next(iter(dataloaders['train']))
if len(args.conditioning) > 0:
print(f'Conditioning on {args.conditioning}')
context_dummy = prepare_context(args.conditioning, data_dummy)
context_node_nf = context_dummy.size(2)
else:
context_node_nf = 0
args.context_node_nf = context_node_nf
# Create EGNN flow
prior, flow, dequantizer, nodes_dist = get_model(args, device)
flow = flow.to(device)
dequantizer = dequantizer.to(device)
optim = get_optim(args, flow, dequantizer)
print(flow)
gradnorm_queue = utils.Queue()
gradnorm_queue.add(3000) # Add large value that will be flushed.
def check_mask_correct(variables, node_mask):
for variable in variables:
assert_correctly_masked(variable, node_mask)
def train_epoch(loader, epoch, flow, flow_dp):
nll_epoch = []
for i, data in enumerate(loader):
# Get data
x = data['positions'].to(device, dtype)
node_mask = data['atom_mask'].to(device, dtype).unsqueeze(2)
edge_mask = data['edge_mask'].to(device, dtype)
one_hot = data['one_hot'].to(device, dtype)
charges = data['charges'].to(device, dtype).unsqueeze(2)
x = remove_mean_with_mask(x, node_mask)
if args.data_augmentation:
x = utils.random_rotation(x).detach()
check_mask_correct([x, one_hot, charges], node_mask)
assert_mean_zero_with_mask(x, node_mask)
h = {'categorical': one_hot, 'integer': charges}
if len(args.conditioning) > 0:
context = prepare_context(args.conditioning, data).to(device, dtype)
assert_correctly_masked(context, node_mask)
else:
context = None
optim.zero_grad()
if args.physics:
energy_loss = mol_dim.compute_energy_loss(dequantizer, flow, prior,
nodes_dist, x.clone(), node_mask, edge_mask, context)
else:
energy_loss = 0
nll, reg_term, mean_abs_z = losses.compute_loss_and_nll(args, dequantizer, flow_dp, prior, nodes_dist, x, h,
node_mask, edge_mask, context)
loss = 0.01*energy_loss + nll + args.ode_regularization * reg_term
if args.clip_grad:
grad_norm = utils.gradient_clipping(flow, gradnorm_queue)
else:
grad_norm = 0.
if i % args.n_report_steps == 0:
print(f"\repoch: {epoch}, iter: {i}/{len(loader)}, "
f"Loss {loss.item():.2f}, NLL: {nll.item():.2f}, "
f"RegTerm: {reg_term.item():.1f}, "
f"PhysTerm: {energy_loss.item():.1f}, "
f"GradNorm: {grad_norm:.1f}")
loss.backward()
optim.step()
nll_epoch.append(nll.item())
if i % 100 == 0 and i!=0:
analyze_and_save(epoch)
save_and_sample_chain(epoch=epoch)
sample_different_sizes_and_save(epoch=epoch)
vis.visualize("outputs/%s/epoch_%d" % (args.exp_name, epoch), wandb=wandb)
vis.visualize_chain(
"outputs/%s/epoch_%d/chain/" % (args.exp_name, epoch),
wandb=wandb)
wandb.log({"mean(abs(z))": mean_abs_z}, commit=False)
wandb.log({"Batch NLL": nll.item()}, commit=True)
wandb.log({"Energy": energy_loss.item()}, commit=True)
if args.break_train_epoch:
break
wandb.log({"Train Epoch NLL": np.mean(nll_epoch)}, commit=False)
def test(loader, epoch, flow_dp, partition='Test'):
with torch.no_grad():
nll_epoch = 0
n_samples = 0
for i, data in enumerate(loader):
# Get data
x = data['positions'].to(device, dtype)
batch_size = x.size(0)
node_mask = data['atom_mask'].to(device, dtype).unsqueeze(2)
edge_mask = data['edge_mask'].to(device, dtype)
one_hot = data['one_hot'].to(device, dtype)
charges = data['charges'].to(device, dtype).unsqueeze(2)
x = remove_mean_with_mask(x, node_mask)
check_mask_correct([x, one_hot, charges], node_mask)
assert_mean_zero_with_mask(x, node_mask)
h = {'categorical': one_hot, 'integer': charges}
if len(args.conditioning) > 0:
context = prepare_context(args.conditioning, data).to(device, dtype)
assert_correctly_masked(context, node_mask)
else:
context = None
# transform batch through flow
nll, _, _ = losses.compute_loss_and_nll(args, dequantizer, flow_dp, prior, nodes_dist, x, h, node_mask,
edge_mask, context)
# standard nll from forward KL
nll_epoch += nll.item() * batch_size
n_samples += batch_size
if i % args.n_report_steps == 0:
print(f"\r {partition} NLL \t epoch: {epoch}, iter: {i}/{len(loader)}, "
f"NLL: {nll_epoch/n_samples:.2f}")
if args.break_train_epoch:
break
return nll_epoch/n_samples
def save_and_sample_chain(epoch=0, id_from=0):
one_hot, charges, x = sample_chain(
args, device, flow, dequantizer, prior, n_tries=1)
vis.save_xyz_file(
'outputs/%s/epoch_%d/chain/' % (args.exp_name, epoch), one_hot, charges, x,
id_from, name='chain')
return one_hot, charges, x
def sample_different_sizes_and_save(n_samples=10, epoch=0):
for counter in range(n_samples):
n_nodes = nodes_dist.sample()
one_hot, charges, x = sample(args, device, flow, dequantizer, prior, n_samples=1, n_nodes=n_nodes)
vis.save_xyz_file(
'outputs/%s/epoch_%d/' % (args.exp_name, epoch), one_hot,
charges, x,
1*counter, name='molecule')
def analyze_and_save(epoch, n_samples=1000):
print('Analyzing molecule validity...')
molecule_list = []
for i in range(n_samples):
n_nodes = nodes_dist.sample()
one_hot, charges, x = sample(
args, device, flow, dequantizer, prior, n_samples=1, n_nodes=n_nodes)
molecule_list.append((one_hot.detach(), x.detach()))
validity_dict, _ = analyze_stability_for_molecules(molecule_list)
wandb.log(validity_dict)
return validity_dict
def sample_batch(prior, flow):
print('Creating...')
n_nodes = nodes_dist.sample()
_, _, x = sample(args, device, flow, dequantizer, prior, n_samples=1, n_nodes=n_nodes)
return x
def main():
if args.resume is not None:
flow_state_dict = torch.load(join(args.resume, 'flow.npy'))
dequantizer_state_dict = torch.load(join(args.resume, 'dequantizer.npy'))
optim_state_dict = torch.load(join(args.resume, 'optim.npy'))
flow.load_state_dict(flow_state_dict)
dequantizer.load_state_dict(dequantizer_state_dict)
optim.load_state_dict(optim_state_dict)
flow_dp = flow
if args.dp and torch.cuda.device_count() > 1:
print(f'Training using {torch.cuda.device_count()} GPUs')
flow_dp = torch.nn.DataParallel(flow_dp.cpu())
flow_dp = flow_dp.cuda()
best_nll_val = 1e8
best_nll_test = 1e8
for epoch in range(args.start_epoch, args.n_epochs):
start_epoch = time.time()
train_epoch(dataloaders['train'], epoch, flow, flow_dp)
print(f"Epoch took {time.time() - start_epoch:.1f} seconds.")
if epoch % args.test_epochs == 0:
analyze_and_save(epoch)
nll_val = test(dataloaders['valid'], epoch, flow_dp, partition='Val')
nll_test = test(dataloaders['test'], epoch, flow_dp, partition='Test')
if nll_val < best_nll_val:
best_nll_val = nll_val
best_nll_test = nll_test
if args.save_model:
args.current_epoch = epoch + 1
utils.save_model(optim, 'outputs/%s/optim.npy' % args.exp_name)
utils.save_model(flow, 'outputs/%s/flow.npy' % args.exp_name)
utils.save_model(dequantizer, 'outputs/%s/dequantizer.npy' % args.exp_name)
with open('outputs/%s/args.pickle' % args.exp_name, 'wb') as f:
pickle.dump(args, f)
if args.save_model and epoch > 28:
utils.save_model(optim, 'outputs/%s/optim_%d.npy' % (args.exp_name, epoch))
utils.save_model(flow, 'outputs/%s/flow_%d.npy' % (args.exp_name, epoch))
utils.save_model(dequantizer, 'outputs/%s/dequantizer_%d.npy' % (args.exp_name, epoch))
with open('outputs/%s/args_%d.pickle' % (args.exp_name, epoch), 'wb') as f:
pickle.dump(args, f)
print('Val loss: %.4f \t Test loss: %.4f' % (nll_val, nll_test))
print('Best val loss: %.4f \t Best test loss: %.4f' % (best_nll_val, best_nll_test))
wandb.log({"Val loss ": nll_val}, commit=True)
wandb.log({"Test loss ": nll_test}, commit=True)
wandb.log({"Best cross-validated test loss ": best_nll_test}, commit=True)
if __name__ == "__main__":
main()
| 38.977716 | 116 | 0.622811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,641 | 0.188737 |
40186bf606d530f7f4ad08aa9b623b5881609f5c | 230 | py | Python | vise/tests/util/test_string.py | kumagai-group/vise | 8adfe61ad8f31767ec562f02f271e2495f357cd4 | [
"MIT"
]
| 16 | 2020-07-14T13:14:05.000Z | 2022-03-04T13:39:30.000Z | vise/tests/util/test_string.py | kumagai-group/vise | 8adfe61ad8f31767ec562f02f271e2495f357cd4 | [
"MIT"
]
| 10 | 2021-03-15T20:47:45.000Z | 2021-08-19T00:47:12.000Z | vise/tests/util/test_string.py | kumagai-group/vise | 8adfe61ad8f31767ec562f02f271e2495f357cd4 | [
"MIT"
]
| 6 | 2020-03-03T00:42:39.000Z | 2022-02-22T02:34:47.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021. Distributed under the terms of the MIT License.
from vise.util.string import numbers_to_lowercases
def test_numbers_to_lowercases():
assert numbers_to_lowercases("Mg2") == "Mg₂" | 32.857143 | 70 | 0.73913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.452586 |
401926cb60c477135712ef8b53eac69d6cf43064 | 421 | py | Python | code/ch_02_foundations/_02_noneness.py | SuppMonkey/write.pythonic.code | 4400b219198c14ea0d7d9453cf6d367123b6ce8c | [
"MIT"
]
| 679 | 2016-06-16T22:19:40.000Z | 2022-03-25T19:31:45.000Z | code/ch_02_foundations/_02_noneness.py | SuppMonkey/write.pythonic.code | 4400b219198c14ea0d7d9453cf6d367123b6ce8c | [
"MIT"
]
| 11 | 2017-04-17T15:25:42.000Z | 2019-11-30T15:58:28.000Z | code/ch_02_foundations/_02_noneness.py | SuppMonkey/write.pythonic.code | 4400b219198c14ea0d7d9453cf6d367123b6ce8c | [
"MIT"
]
| 199 | 2016-06-21T19:13:47.000Z | 2022-03-25T03:36:54.000Z | def find_accounts(search_text):
# perform search...
if not db_is_available:
return None
# returns a list of account IDs
return db_search(search_text)
accounts = find_accounts('python')
if accounts is None:
print("Error: DB not available")
else:
print("Accounts found: Would list them here...")
def db_search(search_text):
return [1, 11]
db_is_availble = True
| 10.268293 | 52 | 0.655582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.294537 |
401988f94a7b7ebda02b1f821bbce411385f8136 | 3,885 | py | Python | pupa/tests/importers/test_base_importer.py | influence-usa/pupa | 5105c39a535ad401f7babe4eecb3861bed1f8326 | [
"BSD-3-Clause"
]
| null | null | null | pupa/tests/importers/test_base_importer.py | influence-usa/pupa | 5105c39a535ad401f7babe4eecb3861bed1f8326 | [
"BSD-3-Clause"
]
| 3 | 2015-06-09T19:22:50.000Z | 2015-06-09T21:41:22.000Z | pupa/tests/importers/test_base_importer.py | influence-usa/pupa | 5105c39a535ad401f7babe4eecb3861bed1f8326 | [
"BSD-3-Clause"
]
| null | null | null | import os
import json
import shutil
import tempfile
import mock
import pytest
from opencivicdata.models import Person
from pupa.scrape import Person as ScrapePerson
from pupa.scrape import Organization as ScrapeOrganization
from pupa.importers.base import omnihash, BaseImporter
from pupa.importers import PersonImporter, OrganizationImporter
from pupa.exceptions import UnresolvedIdError, DataImportError
class FakeImporter(BaseImporter):
_type = 'test'
def test_omnihash_python_types():
# string
assert omnihash('test') == omnihash('test')
# list
assert omnihash(['this', 'is', 'a', 'list']) == omnihash(['this', 'is', 'a', 'list'])
# set
assert omnihash({'and', 'a', 'set'}) == omnihash({'set', 'set', 'and', 'a'})
# dict w/ set and tuple as well
assert (omnihash({'a': {('fancy', 'nested'): {'dict'}}}) ==
omnihash({'a': {('fancy', 'nested'): {'dict'}}}))
def test_import_directory():
# write out some temp data to filesystem
datadir = tempfile.mkdtemp()
dicta = {'test': 'A'}
dictb = {'test': 'B'}
open(os.path.join(datadir, 'test_a.json'), 'w').write(json.dumps(dicta))
open(os.path.join(datadir, 'test_b.json'), 'w').write(json.dumps(dictb))
# simply ensure that import directory calls import_data with all dicts
ti = FakeImporter('jurisdiction-id')
with mock.patch.object(ti, attribute='import_data') as mockobj:
ti.import_directory(datadir)
# import_data should be called once
assert mockobj.call_count == 1
# kind of hacky, get the total list of args passed in
arg_objs = list(mockobj.call_args[0][0])
# 2 args only, make sure a and b are in there
assert len(arg_objs) == 2
assert dicta in arg_objs
assert dictb in arg_objs
# clean up datadir
shutil.rmtree(datadir)
# doing these next few tests just on a Person because it is the same code that handles it
# but for completeness maybe it is better to do these on each type?
@pytest.mark.django_db
def test_deduplication_identical_object():
p1 = ScrapePerson('Dwayne').as_dict()
p2 = ScrapePerson('Dwayne').as_dict()
PersonImporter('jid').import_data([p1, p2])
assert Person.objects.count() == 1
@pytest.mark.django_db
def test_exception_on_identical_objects_in_import_stream():
# these two objects aren't identical, but refer to the same thing
# at the moment we consider this an error (but there may be a better way to handle this?)
o1 = ScrapeOrganization('X-Men', classification='unknown').as_dict()
o2 = ScrapeOrganization('X-Men', founding_date='1970', classification='unknown').as_dict()
with pytest.raises(Exception):
OrganizationImporter('jid').import_data([o1, o2])
@pytest.mark.django_db
def test_resolve_json_id():
p1 = ScrapePerson('Dwayne').as_dict()
p2 = ScrapePerson('Dwayne').as_dict()
pi = PersonImporter('jid')
# do import and get database id
p1_id = p1['_id']
p2_id = p2['_id']
pi.import_data([p1, p2])
db_id = Person.objects.get().id
# simplest case
assert pi.resolve_json_id(p1_id) == db_id
# duplicate should resolve to same id
assert pi.resolve_json_id(p2_id) == db_id
# a null id should map to None
assert pi.resolve_json_id(None) is None
# no such id
with pytest.raises(UnresolvedIdError):
pi.resolve_json_id('this-is-invalid')
@pytest.mark.django_db
def test_invalid_fields():
p1 = ScrapePerson('Dwayne').as_dict()
p1['newfield'] = "shouldn't happen"
with pytest.raises(DataImportError):
PersonImporter('jid').import_data([p1])
@pytest.mark.django_db
def test_invalid_fields_related_item():
p1 = ScrapePerson('Dwayne')
p1.add_link('http://example.com')
p1 = p1.as_dict()
p1['links'][0]['test'] = 3
with pytest.raises(DataImportError):
PersonImporter('jid').import_data([p1])
| 31.585366 | 94 | 0.686486 | 52 | 0.013385 | 0 | 0 | 1,892 | 0.487001 | 0 | 0 | 1,160 | 0.298584 |
401b154f2a06b6253bd915fb79af056b04b243aa | 6,008 | py | Python | packaging/bdist_trinoadmin.py | wgzhao/trino-admin | cd2c71e4d0490cf836a7ddf0dbab69b967408ac8 | [
"Apache-2.0"
]
| null | null | null | packaging/bdist_trinoadmin.py | wgzhao/trino-admin | cd2c71e4d0490cf836a7ddf0dbab69b967408ac8 | [
"Apache-2.0"
]
| 2 | 2021-10-19T05:37:09.000Z | 2022-03-29T22:07:21.000Z | packaging/bdist_trinoadmin.py | wgzhao/trino-admin | cd2c71e4d0490cf836a7ddf0dbab69b967408ac8 | [
"Apache-2.0"
]
| 1 | 2021-12-27T02:38:32.000Z | 2021-12-27T02:38:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from distutils import log as logger
from distutils.dir_util import remove_tree
import pip
try:
from setuptools import Command
except ImportError:
from distutils.core import Command
from packaging import package_dir
class bdist_trinoadmin(Command):
description = 'create a distribution for trino-admin'
user_options = [('bdist-dir=', 'b',
'temporary directory for creating the distribution'),
('dist-dir=', 'd',
'directory to put final built distributions in'),
('virtualenv-version=', None,
'version of virtualenv to download'),
('keep-temp', 'k',
'keep the pseudo-installation tree around after ' +
'creating the distribution archive'),
('online-install', None, 'boolean flag indicating if ' +
'the installation should pull dependencies from the ' +
'Internet or use the ones supplied in the third party ' +
'directory'),
('plat-name=', 'p',
'platform name to embed in generated filenames' +
'(default: linux_x86_64)')
]
default_virtualenv_version = '12.0.7'
NATIVE_WHEELS = ['pycrypto-2.6.1-{0}-none-linux_x86_64.whl', 'twofish-0.3.0-{0}-none-linux_x86_64.whl']
def build_wheel(self, build_dir):
cmd = self.reinitialize_command('bdist_wheel')
cmd.dist_dir = build_dir
self.run_command('bdist_wheel')
cmd.compression = 'deflated'
# Ensure that you get the finalized archive name
cmd.finalize_options()
# wheel_name = cmd.get_archive_basename()
# logger.info('creating %s in %s', wheel_name + '.whl', build_dir)
return ""
def generate_install_script(self, wheel_name, build_dir):
with open(os.path.join(package_dir, 'install-trinoadmin.template'), 'r') as template:
with open(os.path.join(build_dir, 'install-trinoadmin.sh'), 'w') as install_script_file:
install_script = self._fill_in_template(template.readlines(), wheel_name)
install_script_file.write(install_script)
os.chmod(os.path.join(build_dir, 'install-trinoadmin.sh'), 0o755)
def _fill_in_template(self, template_lines, wheel_name):
if self.online_install:
extra_install_args = ''
else:
extra_install_args = '--no-index --find-links third-party'
filled_in = [self._replace_template_values(line, wheel_name, extra_install_args) for line in template_lines]
return ''.join(filled_in)
def _replace_template_values(self, line, wheel_name, extra_install_args):
line = re.sub(r'%ONLINE_OR_OFFLINE_INSTALL%', extra_install_args, line)
line = re.sub(r'%WHEEL_NAME%', wheel_name, line)
line = re.sub(r'%VIRTUALENV_VERSION%', self.virtualenv_version, line)
return line
def package_dependencies(self, build_dir):
thirdparty_dir = os.path.join(build_dir, 'third-party')
requirements = self.distribution.install_requires
for requirement in requirements:
pip.main(['wheel',
'--wheel-dir={0}'.format(thirdparty_dir),
'--no-cache',
requirement])
pip.main(['download',
'-d',
thirdparty_dir,
'--no-cache-dir',
'--no-binary',
':all:',
'virtualenv=={0}'.format(self.virtualenv_version)])
def archive_dist(self, build_dir, dist_dir):
archive_basename = self.distribution.get_fullname()
if self.online_install:
archive_basename += '-online'
else:
archive_basename += '-offline'
archive_file = os.path.join(dist_dir, archive_basename)
self.mkpath(os.path.dirname(archive_file))
self.make_archive(archive_file, 'gztar',
root_dir=os.path.dirname(build_dir),
base_dir=os.path.basename(build_dir))
logger.info('created %s.tar.gz', archive_file)
def run(self):
build_dir = self.bdist_dir
self.mkpath(build_dir)
wheel_name = self.build_wheel(build_dir)
self.generate_install_script(wheel_name, build_dir)
if not self.online_install:
self.package_dependencies(build_dir)
self.archive_dist(build_dir, self.dist_dir)
if not self.keep_temp:
remove_tree(build_dir)
def initialize_options(self):
self.bdist_dir = None
self.dist_dir = None
self.virtualenv_url_base = None
self.virtualenv_version = None
self.keep_temp = False
self.online_install = False
self.plat_name = None
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, self.distribution.get_name())
if self.dist_dir is None:
self.dist_dir = 'dist'
if self.virtualenv_version is None:
self.virtualenv_version = self.default_virtualenv_version
self.plat_name_supplied = self.plat_name is not None
| 37.55 | 116 | 0.621172 | 5,166 | 0.859854 | 0 | 0 | 0 | 0 | 0 | 0 | 1,794 | 0.298602 |
401bf5f8c246403323fb3816b89f804ced1d9820 | 547 | py | Python | 2020-05-month-long-challenge/day22.py | jkbockstael/leetcode | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | [
"Unlicense"
]
| null | null | null | 2020-05-month-long-challenge/day22.py | jkbockstael/leetcode | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | [
"Unlicense"
]
| null | null | null | 2020-05-month-long-challenge/day22.py | jkbockstael/leetcode | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | [
"Unlicense"
]
| null | null | null | #!/usr/bin/env python3
# Day 22: Sort Characters By Frequency
#
# Given a string, sort it in decreasing order based on the frequency of
# characters.
import collections
class Solution:
def frequencySort(self, s: str) -> str:
return "".join(map(
lambda t: t[0] * t[1],
collections.Counter(s).most_common(len(s))))
# Tests
assert Solution().frequencySort("tree") in ["eert", "eetr"]
assert Solution().frequencySort("cccaaa") in ["cccaaa", "aaaccc"]
assert Solution().frequencySort("Aabb") in ["bbAa", "bbaA"]
| 27.35 | 71 | 0.659963 | 179 | 0.327239 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.391225 |
401c85c8336927c2f23953dd8bb76eb17a0d8316 | 1,877 | py | Python | loc.py | relax-space/pandas-first | c8aceae09263a9566ef7dc7631e27f25d569aad4 | [
"Apache-2.0"
]
| null | null | null | loc.py | relax-space/pandas-first | c8aceae09263a9566ef7dc7631e27f25d569aad4 | [
"Apache-2.0"
]
| null | null | null | loc.py | relax-space/pandas-first | c8aceae09263a9566ef7dc7631e27f25d569aad4 | [
"Apache-2.0"
]
| null | null | null | '''
说明: loc和iloc有几个功能
1. 可以获取一行或者多行数据
2. 可以获取1列或多列数据
3. 可以获取某个单元格的数据
对应dataframe来说, 在不指定index和columns的情况下,iloc和loc一样
区别在于,iloc根据索引下标取值, loc根据索引值取值
'''
import numpy as np
import pandas as pd
def test_1():
# 按行取值
pf = pd.DataFrame([[1, 2], [3, 4]])
iloc_0 = pf.iloc[0]
loc_0 = pf.loc[0]
assert pd.Series == type(iloc_0) == type(loc_0), 'loc error'
assert [1, 2
] == iloc_0.values.tolist() == loc_0.values.tolist(), 'loc 2 error'
# 看看下面的区别,索引下标和索引值的区别
iloc_01 = pf.iloc[0:2]
loc_01 = pf.loc[0:1]
assert [[1, 2], [
3, 4
]] == iloc_01.values.tolist() == loc_01.values.tolist(), 'loc 3 error'
def test_2():
# 按列取值
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
iloc_0 = df.iloc[:, 0]
loc_0 = df.loc[:, 0]
assert pd.Series == type(iloc_0) == type(loc_0), 'loc2 1 error'
assert [
1, 4
] == iloc_0.values.tolist() == loc_0.values.tolist(), 'loc2 2 error'
loc_01 = df.loc[:, 0:1]
assert pd.DataFrame == type(loc_01), 'loc2 3 error'
assert [[1, 2], [4, 5]] == loc_01.values.tolist(), 'loc2 4 error'
def test_3():
# 按单元格取值
df = pd.DataFrame([[1, 2], [3, 4]])
iloc_00 = df.iloc[0, 0]
loc_00 = df.loc[0, 0]
assert np.int64 == type(iloc_00) == type(loc_00), 'loc3 1 error'
assert 1.0 == iloc_00 == loc_00, 'loc3 2 error'
def test_4():
# loc 和iloc 区别, 当设置index或columns参数后
df = pd.DataFrame([[1, 2], [3, 4]],
index=['day1', 'day2'],
columns=['grape', 'pineapple'])
# 第一行
iloc_0 = df.iloc[0]
loc_0 = df.loc['day1']
assert [
1, 2
] == iloc_0.values.tolist() == loc_0.values.tolist(), 'loc4 1 error'
# 第一列
iloc_col_0 = df.iloc[:, 0]
loc_col_0 = df.loc[:, 'grape']
assert [1, 3] == iloc_col_0.values.tolist() == loc_col_0.values.tolist(
), 'loc4 2 error'
| 26.069444 | 79 | 0.559403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 679 | 0.319229 |
401e8c47a022914e9d9cdffe16372061e6ecc752 | 4,673 | py | Python | checkproject/runner.py | perror/checkproject | 9321470164e010778d32e24dc77c0b28eccd9429 | [
"BSD-3-Clause"
]
| null | null | null | checkproject/runner.py | perror/checkproject | 9321470164e010778d32e24dc77c0b28eccd9429 | [
"BSD-3-Clause"
]
| null | null | null | checkproject/runner.py | perror/checkproject | 9321470164e010778d32e24dc77c0b28eccd9429 | [
"BSD-3-Clause"
]
| null | null | null | """Runner to discover, run and collect the results of all the checks."""
def import_module(module_path):
"""Import a Python file as a module in the current context.
@param module_path: Path to the Python file.
@return: A reference to the module once loaded.
"""
import os
import sys
module_filename = module_path.split(os.sep)[-1]
if int(sys.version[0]) >= 3:
if int(sys.version[2]) >= 5:
# Running a Python 3.5+ version
from importlib.util import spec_from_file_location, module_from_spec
spec = spec_from_file_location(module_filename, module_path)
module = module_from_spec(spec)
spec.loader.exec_module(module)
else:
# Running a Python <= 3.4 version
from importlib.machinery import SourceFileLoader
module = SourceFileLoader(module_filename, module_path).load_module()
else:
# Running a Python 2 version
import imp
module = imp.load_source(module_filename, module_path)
return module
class CheckRunner(object):
"""A class to discover all the checks, run it sequentially and collect
all the results.
"""
def __init__(self, project_dir, checks_dir):
"""Initialize the default runner class.
@param project_dir: Root directory where to find the source
files of the tested project.
@param checks_dir: Root directory where to find are all the
checks.
"""
self.project_dir = project_dir
self.checks_dir = checks_dir
self.checks = None
def discover(self, pattern='check_*.py', top_dir=None):
"""Discover all the checks in the directory 'top_dir' with all methods
matching the given pattern 'pattern' and update the list of checks.
@param pattern: Prefix pattern of the methods for all
checks.
"""
from checkproject.utils import remove_prefix
import os
import fnmatch
if top_dir is None:
top_dir = self.checks_dir
# List of all the check files detected
check_paths = []
# Scanning all files and subdirectories in breadth-first
for path, _, files in os.walk(os.path.abspath(top_dir)):
for filename in fnmatch.filter(files, pattern):
check_paths.append(remove_prefix(os.path.join(path, filename),
self.checks_dir))
# Initialize self.checks
if self.checks is None:
self.checks = []
# Update self.checks
self.checks = sorted(set(self.checks + check_paths))
def list(self, pattern='Check*'):
"""List all the checks discovered in the order of execution.
@return: A list of all the checks ordered as for executing it.
"""
import os
import re
# Initializing self.checks if needed
if self.checks is None:
self.discover()
# Initializing return value
checks = []
# Scanning all the modules
for check_module in self.checks:
module_path = os.path.join(self.checks_dir, check_module)
module_name = module_path.split(os.sep)[-1].split('.')[0]
module = import_module(module_path)
# Extract all the 'Check' classes
classes = [cls for cls in dir(module)
if re.compile(pattern).search(cls) and cls is not 'CheckCase']
for class_name in classes:
cls = getattr(module, class_name)
check = cls(self.project_dir)
checks += [module_name + '.' + cls.__name__ + '.' + m
for m in check.list()]
return checks
def run(self, pattern='Check*'):
"""Execute the checks and collect all the results"""
import os
import re
# Initializing self.checks if needed
if self.checks is None:
self.discover()
# Initializing return value
result = None
# Scanning all the modules
for check_module in self.checks:
module_path = os.path.join(self.checks_dir, check_module)
module = import_module(module_path)
# Extract all the 'Check' classes
classes = [cls for cls in dir(module)
if re.compile(pattern).search(cls) and cls is not 'CheckCase']
for class_name in classes:
cls = getattr(module, class_name)
check = cls(self.project_dir)
result = check.run(result)
return result
| 32.227586 | 85 | 0.596833 | 3,599 | 0.770169 | 0 | 0 | 0 | 0 | 0 | 0 | 1,554 | 0.332549 |
401fd2803f10b2fab1010a7dfe0776cbe8cc8571 | 11,612 | py | Python | neutron_fwaas/extensions/firewall_v2.py | sapcc/neutron-fwaas | 59bad17387d15f86ea7d08f8675208160a999ffe | [
"Apache-2.0"
]
| null | null | null | neutron_fwaas/extensions/firewall_v2.py | sapcc/neutron-fwaas | 59bad17387d15f86ea7d08f8675208160a999ffe | [
"Apache-2.0"
]
| null | null | null | neutron_fwaas/extensions/firewall_v2.py | sapcc/neutron-fwaas | 59bad17387d15f86ea7d08f8675208160a999ffe | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from debtcollector import moves
from neutron.api.v2 import resource_helper
from neutron_lib.api.definitions import constants as api_const
from neutron_lib.api.definitions import firewall_v2
from neutron_lib.api import extensions
from neutron_lib.exceptions import firewall_v2 as f_exc
from neutron_lib.services import base as service_base
from oslo_config import cfg
import six
from neutron_fwaas._i18n import _
from neutron_fwaas.common import fwaas_constants
FirewallGroupNotFound = moves.moved_class(
f_exc.FirewallGroupNotFound, 'FirewallGroupNotFound', __name__)
FirewallGroupInUse = moves.moved_class(
f_exc.FirewallGroupInUse, 'FirewallGroupInUse', __name__)
FirewallGroupInPendingState = moves.moved_class(
f_exc.FirewallGroupInPendingState, 'FirewallGroupInPendingState', __name__)
FirewallGroupPortInvalid = moves.moved_class(
f_exc.FirewallGroupPortInvalid, 'FirewallGroupPortInvalid', __name__)
FirewallGroupPortInvalidProject = moves.moved_class(
f_exc.FirewallGroupPortInvalidProject, 'FirewallGroupPortInvalidProject',
__name__)
FirewallGroupPortInUse = moves.moved_class(
f_exc.FirewallGroupPortInUse, 'FirewallGroupPortInUse', __name__)
FirewallPolicyNotFound = moves.moved_class(
f_exc.FirewallPolicyNotFound, 'FirewallPolicyNotFound', __name__)
FirewallPolicyInUse = moves.moved_class(
f_exc.FirewallPolicyInUse, 'FirewallPolicyInUse', __name__)
FirewallPolicyConflict = moves.moved_class(
f_exc.FirewallPolicyConflict, 'FirewallPolicyConflict', __name__)
FirewallRuleSharingConflict = moves.moved_class(
f_exc.FirewallRuleSharingConflict, 'FirewallRuleSharingConflict',
__name__)
FirewallPolicySharingConflict = moves.moved_class(
f_exc.FirewallPolicySharingConflict, 'FirewallPolicySharingConflict',
__name__)
FirewallRuleNotFound = moves.moved_class(
f_exc.FirewallRuleNotFound, 'FirewallRuleNotFound', __name__)
FirewallRuleInUse = moves.moved_class(
f_exc.FirewallRuleInUse, 'FirewallRuleInUse', __name__)
FirewallRuleNotAssociatedWithPolicy = moves.moved_class(
f_exc.FirewallRuleNotAssociatedWithPolicy,
'FirewallRuleNotAssociatedWithPolicy',
__name__)
FirewallRuleInvalidProtocol = moves.moved_class(
f_exc.FirewallRuleInvalidProtocol, 'FirewallRuleInvalidProtocol',
__name__)
FirewallRuleInvalidAction = moves.moved_class(
f_exc.FirewallRuleInvalidAction, 'FirewallRuleInvalidAction',
__name__)
FirewallRuleInvalidICMPParameter = moves.moved_class(
f_exc.FirewallRuleInvalidICMPParameter,
'FirewallRuleInvalidICMPParameter', __name__)
FirewallRuleWithPortWithoutProtocolInvalid = moves.moved_class(
f_exc.FirewallRuleWithPortWithoutProtocolInvalid,
'FirewallRuleWithPortWithoutProtocolInvalid', __name__)
FirewallRuleInvalidPortValue = moves.moved_class(
f_exc.FirewallRuleInvalidPortValue, 'FirewallRuleInvalidPortValue',
__name__)
FirewallRuleInfoMissing = moves.moved_class(
f_exc.FirewallRuleInfoMissing, 'FirewallRuleInfoMissing', __name__)
FirewallIpAddressConflict = moves.moved_class(
f_exc.FirewallIpAddressConflict, 'FirewallIpAddressConflict', __name__)
FirewallInternalDriverError = moves.moved_class(
f_exc.FirewallInternalDriverError, 'FirewallInternalDriverError', __name__)
FirewallRuleConflict = moves.moved_class(
f_exc.FirewallRuleConflict, 'FirewallRuleConflict', __name__)
FirewallRuleAlreadyAssociated = moves.moved_class(
f_exc.FirewallRuleAlreadyAssociated, 'FirewallRuleAlreadyAssociated',
__name__)
default_fwg_rules_opts = [
cfg.StrOpt('ingress_action',
default=api_const.FWAAS_DENY,
help=_('Firewall group rule action allow or '
'deny or reject for ingress. '
'Default is deny.')),
cfg.StrOpt('ingress_source_ipv4_address',
default=None,
help=_('IPv4 source address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_source_ipv6_address',
default=None,
help=_('IPv6 source address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_source_port',
default=None,
help=_('Source port number or range '
'(min:max) for ingress. '
'Default is None.')),
cfg.StrOpt('ingress_destination_ipv4_address',
default=None,
help=_('IPv4 destination address for ingress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('ingress_destination_ipv6_address',
default=None,
help=_('IPv6 destination address for ingress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('ingress_destination_port',
default=None,
help=_('Destination port number or range '
'(min:max) for ingress. '
'Default is None.')),
cfg.StrOpt('egress_action',
default=api_const.FWAAS_ALLOW,
help=_('Firewall group rule action allow or '
'deny or reject for egress. '
'Default is allow.')),
cfg.StrOpt('egress_source_ipv4_address',
default=None,
help=_('IPv4 source address for egress '
'(address or address/netmask). '
'Default is None.')),
cfg.StrOpt('egress_source_ipv6_address',
default=None,
help=_('IPv6 source address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_source_port',
default=None,
help=_('Source port number or range '
'(min:max) for egress. '
'Default is None.')),
cfg.StrOpt('egress_destination_ipv4_address',
default=None,
help=_('IPv4 destination address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_destination_ipv6_address',
default=None,
help=_('IPv6 destination address for egress '
'(address or address/netmask). '
'Default is deny.')),
cfg.StrOpt('egress_destination_port',
default=None,
help=_('Destination port number or range '
'(min:max) for egress. '
'Default is None.')),
cfg.BoolOpt('shared',
default=False,
help=_('Firewall group rule shared. '
'Default is False.')),
cfg.StrOpt('protocol',
default=None,
help=_('Network protocols (tcp, udp, ...). '
'Default is None.')),
cfg.BoolOpt('enabled',
default=True,
help=_('Firewall group rule enabled. '
'Default is True.')),
]
firewall_quota_opts = [
cfg.IntOpt('quota_firewall_group',
default=10,
help=_('Number of firewall groups allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_policy',
default=10,
help=_('Number of firewall policies allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_rule',
default=100,
help=_('Number of firewall rules allowed per tenant. '
'A negative value means unlimited.')),
]
cfg.CONF.register_opts(default_fwg_rules_opts, 'default_fwg_rules')
cfg.CONF.register_opts(firewall_quota_opts, 'QUOTAS')
# TODO(Reedip): Remove the convert_to functionality after bug1706061 is fixed.
def convert_to_string(value):
if value is not None:
return str(value)
return None
firewall_v2.RESOURCE_ATTRIBUTE_MAP[api_const.FIREWALL_RULES][
'source_port']['convert_to'] = convert_to_string
firewall_v2.RESOURCE_ATTRIBUTE_MAP[api_const.FIREWALL_RULES][
'destination_port']['convert_to'] = convert_to_string
class Firewall_v2(extensions.APIExtensionDescriptor):
api_definition = firewall_v2
@classmethod
def get_resources(cls):
special_mappings = {'firewall_policies': 'firewall_policy'}
plural_mappings = resource_helper.build_plural_mappings(
special_mappings, firewall_v2.RESOURCE_ATTRIBUTE_MAP)
return resource_helper.build_resource_info(
plural_mappings, firewall_v2.RESOURCE_ATTRIBUTE_MAP,
fwaas_constants.FIREWALL_V2, action_map=firewall_v2.ACTION_MAP,
register_quota=True)
@classmethod
def get_plugin_interface(cls):
return Firewallv2PluginBase
@six.add_metaclass(abc.ABCMeta)
class Firewallv2PluginBase(service_base.ServicePluginBase):
def get_plugin_type(self):
return fwaas_constants.FIREWALL_V2
def get_plugin_description(self):
return 'Firewall Service v2 Plugin'
# Firewall Group
@abc.abstractmethod
def create_firewall_group(self, context, firewall_group):
pass
@abc.abstractmethod
def delete_firewall_group(self, context, id):
pass
@abc.abstractmethod
def get_firewall_group(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_groups(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def update_firewall_group(self, context, id, firewall_group):
pass
# Firewall Policy
@abc.abstractmethod
def create_firewall_policy(self, context, firewall_policy):
pass
@abc.abstractmethod
def delete_firewall_policy(self, context, id):
pass
@abc.abstractmethod
def get_firewall_policy(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_policies(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def update_firewall_policy(self, context, id, firewall_policy):
pass
# Firewall Rule
@abc.abstractmethod
def create_firewall_rule(self, context, firewall_rule):
pass
@abc.abstractmethod
def delete_firewall_rule(self, context, id):
pass
@abc.abstractmethod
def get_firewall_rule(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_rules(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def update_firewall_rule(self, context, id, firewall_rule):
pass
@abc.abstractmethod
def insert_rule(self, context, id, rule_info):
pass
@abc.abstractmethod
def remove_rule(self, context, id, rule_info):
pass
| 38.323432 | 79 | 0.673527 | 2,602 | 0.224079 | 0 | 0 | 2,536 | 0.218395 | 0 | 0 | 3,529 | 0.30391 |
40202bd57c8aba134557450b58ae36c3239d01dd | 4,345 | py | Python | model_hub/model_hub/mmdetection/utils.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
]
| null | null | null | model_hub/model_hub/mmdetection/utils.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
]
| null | null | null | model_hub/model_hub/mmdetection/utils.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
]
| null | null | null | """
Various utility functions for using mmdetection in Determined that may be useful
even if not using the provided MMDetTrial.
build_fp16_loss_scaler is large derived from the original mmcv code at
https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py
mmcv is covered by the Apache 2.0 License. Copyright (c) OpenMMLab. All rights reserved.
"""
import os
from typing import Any, Dict, Tuple
import mmcv
import torch
import model_hub.utils
def get_config_pretrained_url_mapping() -> Dict[str, str]:
"""
Walks the MMDETECTION_CONFIG_DIR and creates a mapping of configs
to urls for pretrained checkpoints. The url for pretrained checkpoints
are parsed from the README files in each of the mmdetection config folders.
MMDETECTION_CONFIG_DIR is set to /mmdetection/configs in the default
determinedai/model-hub-mmdetection docker image.
"""
models = {}
config_dir = os.getenv("MMDETECTION_CONFIG_DIR")
if config_dir:
for root, _, files in os.walk(config_dir):
for f in files:
if "README" in f:
with open(os.path.join(root, f), "r") as readme:
lines = readme.readlines()
for line in lines:
if "[config]" in line:
start = line.find("[config]")
end = line.find(".py", start)
start = line.rfind("/", start, end)
config_name = line[start + 1 : end + 3]
start = line.find("[model]")
end = line.find(".pth", start)
ckpt_name = line[start + 8 : end + 4]
models[config_name] = ckpt_name
return models
CONFIG_TO_PRETRAINED = get_config_pretrained_url_mapping()
def get_pretrained_ckpt_path(download_directory: str, config_file: str) -> Tuple[Any, Any]:
"""
If the config_file has an associated pretrained checkpoint,
return path to downloaded checkpoint and preloaded checkpoint
Arguments:
download_directory: path to download checkpoints to
config_file: mmdet config file path for which to find and load pretrained weights
Returns:
checkpoint path, loaded checkpoint
"""
config_file = config_file.split("/")[-1]
if config_file in CONFIG_TO_PRETRAINED:
ckpt_path = model_hub.utils.download_url(
download_directory, CONFIG_TO_PRETRAINED[config_file]
)
return ckpt_path, torch.load(ckpt_path) # type: ignore
return None, None
def build_fp16_loss_scaler(loss_scale: mmcv.Config) -> Any:
"""
This function is derived from mmcv, which is coverd by the Apache 2.0 License.
Copyright (c) OpenMMLab. All rights reserved.
Arguments:
loss_scale (float | str | dict): Scale factor configuration.
If loss_scale is a float, static loss scaling will be used with
the specified scale. If loss_scale is a string, it must be
'dynamic', then dynamic loss scaling will be used.
It can also be a dict containing arguments of GradScalar.
Defaults to 512. For PyTorch >= 1.6, mmcv uses official
implementation of GradScaler. If you use a dict version of
loss_scale to create GradScaler, please refer to:
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler
for the parameters.
Examples:
>>> loss_scale = dict(
... init_scale=65536.0,
... growth_factor=2.0,
... backoff_factor=0.5,
... growth_interval=2000
... )
"""
if loss_scale == "dynamic":
loss_scaler = torch.cuda.amp.GradScaler() # type: ignore
elif isinstance(loss_scale, float):
loss_scaler = torch.cuda.amp.GradScaler(init_scale=loss_scale) # type: ignore
elif isinstance(loss_scale, dict):
loss_scaler = torch.cuda.amp.GradScaler(**loss_scale) # type: ignore
else:
raise Exception(
"Cannot parse fp16 configuration. Expected cfg to be str(dynamic), float or dict."
)
return loss_scaler
| 40.990566 | 95 | 0.61519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,422 | 0.557422 |
40203044d0b70862532fc8cce70af574c829a8d8 | 2,465 | py | Python | gcloud/datastores/tests/STUB_test_bigquery.py | pantheon-ci-bot/etl-framework | 36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f | [
"MIT"
]
| 2 | 2017-03-01T20:09:06.000Z | 2019-02-08T17:10:16.000Z | gcloud/datastores/tests/STUB_test_bigquery.py | pantheon-ci-bot/etl-framework | 36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f | [
"MIT"
]
| 40 | 2015-10-10T15:02:21.000Z | 2020-03-17T22:32:04.000Z | gcloud/datastores/tests/STUB_test_bigquery.py | pantheon-ci-bot/etl-framework | 36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f | [
"MIT"
]
| 2 | 2018-11-14T21:50:58.000Z | 2022-03-07T20:59:27.000Z | """tests bigquery client"""
import unittest
from gcloud.datastores.bigquery import BigqueryClient
class BigqueryClientTestCases(unittest.TestCase):
"""stuff"""
@classmethod
def setUpClass(cls):
cls.project_id = 'test'
cls.dataset_id = 'etl_test'
cls.table_id = 'etl_test'
cls.table_schema = {
"fields": [
{
"type": "STRING",
"name": "a_key",
"mode": "REQUIRED",
}
]
}
cls.rows = [
{
"insertId": "some_uuid",
"json": {
"a_key": "a_value"
},
},
]
cls.query = "SELECT a_key FROM [{}:{}.{}]".format(
cls.project_id,
cls.dataset_id,
cls.table_id,
)
cls.client = BigqueryClient(
project_name=cls.project_id,
dataset_id=cls.dataset_id
)
# Create a dataset and table (this indirectly tests create and delete)
cls.client.insert_dataset(cls.dataset_id)
cls.client.insert_table(
table_id=cls.table_id,
schema=cls.table_schema
)
@classmethod
def tearDownClass(cls):
# Remove table and dataset (this indirectly tests create and delete)
cls.client.delete_table(cls.table_id)
cls.client.delete_dataset(cls.dataset_id)
def test_get_dataset(self):
self.client.get_dataset(self.dataset_id)
def test_get_table(self):
self.client.get_table(self.table_id)
def test_insert_data(self):
self.client.insert_data(
table_id=self.table_id,
rows=self.rows
)
def test_list_data(self):
self.client.list_data(
table_id=self.table_id
)
def test_list_datasets(self):
self.client.list_datasets()
def test_list_tables(self):
self.client.list_tables(
dataset_id=self.dataset_id
)
def test_patch_table(self):
self.client.patch_table(
table_id=self.table_id,
schema=self.table_schema,
)
def test_query(self):
self.client.query(
query=self.query,
)
def test_update_table(self):
self.client.update_table(
table_id=self.table_id,
schema=self.table_schema,
)
| 22.409091 | 78 | 0.539959 | 2,363 | 0.958621 | 0 | 0 | 1,291 | 0.523732 | 0 | 0 | 326 | 0.132252 |
40219219083fe79c8f213a75f899041ef2518cf2 | 354 | py | Python | filter_hash.py | mbougarne/python-algos | f05c491903dfce95ee134852252c55c2cee1b07a | [
"MIT"
]
| null | null | null | filter_hash.py | mbougarne/python-algos | f05c491903dfce95ee134852252c55c2cee1b07a | [
"MIT"
]
| null | null | null | filter_hash.py | mbougarne/python-algos | f05c491903dfce95ee134852252c55c2cee1b07a | [
"MIT"
]
| null | null | null | fruits = ["orange", "banana", "apple", "avocado", "kiwi", "apricot",
"cherry", "grape", "coconut", "lemon", "mango", "peach",
"pear", "strawberry", "pineapple", "apple", "orange", "pear",
"grape", "banana"
]
filters = dict()
for key in fruits:
filters[key] = 1
result = set(filters.keys())
print(result) | 27.230769 | 73 | 0.536723 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.443503 |
4022d54aeba2badfe2c92ef3c771f491343dff82 | 1,919 | py | Python | teste/knn.py | joandesonandrade/nebulosa | 5bc157322ed0bdb81f6f00f6ed1ea7f7a5cadfe0 | [
"MIT"
]
| null | null | null | teste/knn.py | joandesonandrade/nebulosa | 5bc157322ed0bdb81f6f00f6ed1ea7f7a5cadfe0 | [
"MIT"
]
| null | null | null | teste/knn.py | joandesonandrade/nebulosa | 5bc157322ed0bdb81f6f00f6ed1ea7f7a5cadfe0 | [
"MIT"
]
| null | null | null | from sklearn import preprocessing
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
#Abrindo o dados como Dataframe
dados = pd.read_csv('dados/001.csv')
#Iniciando o método para binanizar as classe sim=1; não=0
pre = preprocessing.LabelBinarizer()
#Binazirando a classe jogou, e atribuíndo a uma matriz n-dimencional
y_binary = pre.fit_transform(dados['jogou'])
y = np.array(y_binary).ravel()
lista_clima = [x for x in dados['clima']]
lista_temperatura = [x for x in dados['temperatura']]
lista_jogou = [x for x in dados['jogou']]
pre = preprocessing.LabelEncoder()
clima_encoding = pre.fit_transform(lista_clima)
temperatura_encoding = pre.fit_transform(lista_temperatura)
jogou_encoding = pre.fit_transform(lista_jogou)
lista = list(zip(clima_encoding, temperatura_encoding, jogou_encoding))
X = np.array(lista, dtype=np.int32)
#colunas = ['A', 'B', 'C']
# print(pd.DataFrame(X, columns=colunas, dtype=np.int32))
# print(pd.DataFrame(y, columns=['Classe'], dtype=np.int32))
#
# xX = []
# for i, x in enumerate(X):
# xX.append([list(x), y[i][0]])
#
# dX = [(x[0][0] + x[0][1] + x[0][2]) for x in xX]
# dY = [x[1] for x in xX]
#
# print('Soma dos rótulos:', dX)
# print('Classe:', dY)
#
# fig, ax = plt.subplots()
# ax.plot(dX)
# ax.plot(dY)
# plt.show()
from sklearn import model_selection
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
#Dividido os dados, onde o treinamento ficará com 75% e teste 25%, eu sempre uso este padrão :)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25, random_state=0)
#Gerando o modelo, vou deixar os parâmetros padrão
knn = KNeighborsClassifier()
#Treinando o modelo
knn.fit(X=X_train, y=y_train)
#Avaliando a pontuação do modelo, usando os dados de teste
pontuacao = str(accuracy_score(y_test, knn.predict(X_test)) * 100)
print("Precisão: "+pontuacao+"%")
| 28.641791 | 105 | 0.727983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 896 | 0.464249 |
40255e51d495409353d842161452761a11a4b039 | 8,940 | py | Python | components/google-cloud/tests/container/experimental/gcp_launcher/test_batch_prediction_job_remote_runner.py | m-mayran/pipelines | 4e89973504980ff89d896fda09fc29a339b2d744 | [
"Apache-2.0"
]
| null | null | null | components/google-cloud/tests/container/experimental/gcp_launcher/test_batch_prediction_job_remote_runner.py | m-mayran/pipelines | 4e89973504980ff89d896fda09fc29a339b2d744 | [
"Apache-2.0"
]
| null | null | null | components/google-cloud/tests/container/experimental/gcp_launcher/test_batch_prediction_job_remote_runner.py | m-mayran/pipelines | 4e89973504980ff89d896fda09fc29a339b2d744 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Vertex AI Batch Prediction Job Remote Runner Client module."""
import json
from logging import raiseExceptions
import os
import time
import unittest
from unittest import mock
from google.cloud import aiplatform
from google.cloud.aiplatform.compat.types import job_state as gca_job_state
from google.protobuf import json_format
from google_cloud_pipeline_components.proto.gcp_resources_pb2 import GcpResources
from google_cloud_pipeline_components.container.experimental.gcp_launcher import batch_prediction_job_remote_runner
from google_cloud_pipeline_components.container.experimental.gcp_launcher import job_remote_runner
class BatchPredictionJobRemoteRunnerUtilsTests(unittest.TestCase):
def setUp(self):
super(BatchPredictionJobRemoteRunnerUtilsTests, self).setUp()
self._payload = (
'{"batchPredictionJob": {"displayName": '
'"BatchPredictionComponentName", "model": '
'"projects/test/locations/test/models/test-model","inputConfig":'
' {"instancesFormat": "CSV","gcsSource": {"uris": '
'["test_gcs_source"]}}, "outputConfig": {"predictionsFormat": '
'"CSV", "gcsDestination": {"outputUriPrefix": '
'"test_gcs_destination"}}}}')
self._job_type = 'BatchPredictionJob'
self._project = 'test_project'
self._location = 'test_region'
self._batch_prediction_job_name = '/projects/{self._project}/locations/{self._location}/jobs/test_job_id'
self._gcp_resources_path = 'gcp_resources'
self._batch_prediction_job_uri_prefix = f'https://{self._location}-aiplatform.googleapis.com/v1/'
def tearDown(self):
if os.path.exists(self._gcp_resources_path):
os.remove(self._gcp_resources_path)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
def test_batch_prediction_job_remote_runner_on_region_is_set_correctly_in_client_options(
self, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
mock_job_service_client.assert_called_once_with(
client_options={
'api_endpoint': 'test_region-aiplatform.googleapis.com'
},
client_info=mock.ANY)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_on_payload_deserializes_correctly(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
expected_parent = f'projects/{self._project}/locations/{self._location}'
expected_job_spec = json.loads(self._payload, strict=False)
job_client.create_batch_prediction_job.assert_called_once_with(
parent=expected_parent, batch_prediction_job=expected_job_spec)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_raises_exception_on_error(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_FAILED
mock_path_exists.return_value = False
with self.assertRaises(RuntimeError):
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_batch_prediction_job_remote_runner_retries_to_get_status_on_non_completed_job(
self, mock_time_sleep, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response_success = mock.Mock()
get_batch_prediction_job_response_success.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
get_batch_prediction_job_response_running = mock.Mock()
get_batch_prediction_job_response_running.state = gca_job_state.JobState.JOB_STATE_RUNNING
job_client.get_batch_prediction_job.side_effect = [
get_batch_prediction_job_response_running,
get_batch_prediction_job_response_success
]
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
mock_time_sleep.assert_called_once_with(
job_remote_runner._POLLING_INTERVAL_IN_SECONDS)
self.assertEqual(job_client.get_batch_prediction_job.call_count, 2)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_returns_gcp_resources(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response_success = mock.Mock()
get_batch_prediction_job_response_success.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
job_client.get_batch_prediction_job.side_effect = [
get_batch_prediction_job_response_success
]
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources_path)
with open(self._gcp_resources_path) as f:
serialized_gcp_resources = f.read()
# Instantiate GCPResources Proto
batch_prediction_job_resources = json_format.Parse(
serialized_gcp_resources, GcpResources())
self.assertEqual(len(batch_prediction_job_resources.resources), 1)
batch_prediction_job_name = batch_prediction_job_resources.resources[
0].resource_uri[len(self._batch_prediction_job_uri_prefix):]
self.assertEqual(batch_prediction_job_name,
self._batch_prediction_job_name)
| 45.380711 | 115 | 0.794519 | 7,697 | 0.860962 | 0 | 0 | 6,616 | 0.740045 | 0 | 0 | 1,459 | 0.163199 |
4027f13cd3b7d7bc0f1afe366ba2f0949bed351e | 805 | py | Python | rotypes/Windows/Storage/Streams/__init__.py | Gliese129/ArknightsAutoHelper | 43971a63da55001ebc55a7e0de56e9364dff04bb | [
"MIT"
]
| 18 | 2022-03-18T08:20:28.000Z | 2022-03-31T15:19:15.000Z | rotypes/Windows/Storage/Streams/__init__.py | Gliese129/ArknightsAutoHelper | 43971a63da55001ebc55a7e0de56e9364dff04bb | [
"MIT"
]
| 4 | 2021-01-23T13:05:39.000Z | 2021-09-11T14:29:56.000Z | rotypes/Windows/Storage/Streams/__init__.py | Gliese129/ArknightsAutoHelper | 43971a63da55001ebc55a7e0de56e9364dff04bb | [
"MIT"
]
| 1 | 2022-03-20T05:52:38.000Z | 2022-03-20T05:52:38.000Z | from ctypes import c_uint32, c_void_p, string_at
from rotypes.idldsl import define_winrt_com_method, GUID
from rotypes.inspectable import IInspectable, IUnknown
@GUID('905a0fef-bc53-11df-8c49-001e4fc686da')
class IBufferByteAccess(IUnknown):
pass
@GUID('905A0FE0-BC53-11DF-8C49-001E4FC686DA')
class IBuffer(IInspectable):
def __len__(self):
return self.Length
def __bytes__(self):
byteaccess = self.astype(IBufferByteAccess)
ptr = byteaccess.Buffer()
return string_at(ptr, len(self))
define_winrt_com_method(IBufferByteAccess, 'Buffer', retval=c_void_p)
define_winrt_com_method(IBuffer, 'get_Capacity', propget=c_uint32)
define_winrt_com_method(IBuffer, 'get_Length', propget=c_uint32)
define_winrt_com_method(IBuffer, 'put_Length', propput=c_uint32)
| 27.758621 | 69 | 0.773913 | 273 | 0.33913 | 0 | 0 | 365 | 0.453416 | 0 | 0 | 122 | 0.151553 |
40293f7dca9ef672564fb8730fe1d23ecd590f2b | 23,410 | py | Python | simple_playgrounds/playground.py | Asjidkalam/simple-playgrounds | 72ec42987a33175103191fa9722e0e002f889954 | [
"MIT"
]
| null | null | null | simple_playgrounds/playground.py | Asjidkalam/simple-playgrounds | 72ec42987a33175103191fa9722e0e002f889954 | [
"MIT"
]
| null | null | null | simple_playgrounds/playground.py | Asjidkalam/simple-playgrounds | 72ec42987a33175103191fa9722e0e002f889954 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
""" Playground documentation.
Module defining Playground Base Class
"""
import os
from abc import ABC
import yaml
import pymunk
from .utils import PositionAreaSampler
from .utils.definitions import SPACE_DAMPING, CollisionTypes, SceneElementTypes
# pylint: disable=unused-argument
# pylint: disable=line-too-long
class Playground(ABC):
""" Playground is a Base Class that manages the physical simulation.
Playground manages the interactions between Agents and Scene Elements.
Attributes:
size: size of the scene (width, length).
scene_elements: list of SceneElements present in the Playground.
fields: list of fields producing SceneElements in the Playground.
agents: list of Agents present in the Playground.
initial_agent_position: position or PositionAreaSampler,
Starting position of an agent (single agent).
done: bool, True if the playground reached termination.
"""
# pylint: disable=too-many-instance-attributes
scene_entities = []
def __init__(self, size):
# Generate Scene
self.size = size
self._width, self._length = self.size
# Initialization of the pymunk space, modelling all the physics
self.space = self._initialize_space()
# Public attributes for entities in the playground
self.scene_elements = []
self.fields = []
self.agents = []
# Private attributes for managing interactions in playground
self._disappeared_scene_elements = []
self._grasped_scene_elements = {}
self._teleported = []
# Add entities declared in the scene
for scene_entity in self.scene_entities:
self.add_scene_element(scene_entity)
self.done = False
self.initial_agent_position = None
self._handle_interactions()
self.time_limit = None
self.time_limit_reached_reward = None
self.time_test = 0
@staticmethod
def parse_configuration(key):
""" Private method that parses yaml configuration files.
Args:
key: (str) name of the playground configuration.
Returns:
Dictionary of attributes and default values.
"""
fname = 'utils/configs/playground.yml'
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, fname), 'r') as yaml_file:
default_config = yaml.load(yaml_file, Loader=yaml.SafeLoader)
return default_config[key]
@staticmethod
def _initialize_space():
""" Method to initialize Pymunk empty space for 2D physics.
Returns: Pymunk Space
"""
space = pymunk.Space()
space.gravity = pymunk.Vec2d(0., 0.)
space.damping = SPACE_DAMPING
return space
def update(self, steps):
""" Update the Playground
Update all SceneElements, Fields, Timers and Grasps
Runs the Physics engine for n steps.
Args:
steps: Number of steps
"""
for agent in self.agents:
agent.pre_step()
for _ in range(steps):
self.space.step(1. / steps)
for elem in self.scene_elements:
elem.pre_step()
if elem.follows_waypoints:
self.space.reindex_shapes_for_body(elem.pm_body)
self._fields_produce()
self._check_timers()
self._release_grasps()
self._check_teleports()
def reset(self):
""" Reset the Playground to its initial state.
"""
# remove entities and filter out entities which are temporary
for entity in self.scene_elements.copy():
self.remove_scene_element(entity)
# reset and replace entities that are not temporary
for entity in self._disappeared_scene_elements.copy():
entity.reset()
self.add_scene_element(entity)
# reset fields
for entity in self.fields:
entity.reset()
# reset agents
for agent in self.agents.copy():
agent.reset()
self.remove_agent(agent)
self.add_agent(agent)
self.done = False
def add_agent(self, new_agent, tries=100):
""" Method to add an Agent to the Playground.
If the Agent has its attribute allow_overlapping set to False,
the playground will try to add it multiple times.
Args:
new_agent: Agent to add to the Playground
tries: Number of times the Playground will try to place the agent
"""
# If already there
if new_agent in self.scene_elements:
raise ValueError('Agent already in Playground')
# Inform agent of the playground size
new_agent.size_playground = self.size
if new_agent.allow_overlapping:
self._add_agent(new_agent)
else:
success = self._add_agent_without_ovelapping(new_agent, tries = tries)
if not success:
raise ValueError("Agent couldn't be placed without overlapping")
def _add_agent(self, agent):
""" Add an agent to the playground.
Args:
agent: Agent.
"""
self.agents.append(agent)
if agent.initial_position is not None:
pass
elif self.initial_agent_position is not None:
agent.initial_position = self.initial_agent_position
else:
raise ValueError("""Agent initial position should be defined in the playground or passed as an argument)
to the class agent""")
agent.position = agent.initial_position
for body_part in agent.parts:
self.space.add(*body_part.pm_elements)
def _add_agent_without_ovelapping(self, agent, tries=100):
""" Method to add am Agent to the Playground without overlapping.
Useful when an Agent has a random initial position, to avoid overlapping.
Args:
agent: Agent to add to the Playground
tries: Number of times the Playground will try to place the new_entity
"""
trial = 0
visible_collide_parts = True
interactive_collide_parts = True
all_shapes = self.space.shapes.copy()
while (interactive_collide_parts or visible_collide_parts) and trial < tries:
self._add_agent(agent)
visible_collide_parts = False
interactive_collide_parts = False
for part in agent.parts:
visible_collide = False
interactive_collide = False
if part.pm_visible_shape is not None:
collisions = [part.pm_visible_shape.shapes_collide(shape) for shape in all_shapes]
visible_collide = any([len(collision.points) != 0 for collision in collisions])
if part.pm_interaction_shape is not None:
collisions = [part.pm_interaction_shape.shapes_collide(shape) for shape in all_shapes]
interactive_collide = any([len(collision.points) != 0 for collision in collisions])
visible_collide_parts = visible_collide or visible_collide_parts
interactive_collide_parts = interactive_collide or interactive_collide_parts
if visible_collide_parts or interactive_collide_parts:
self.remove_agent(agent)
trial += 1
if interactive_collide_parts or visible_collide_parts:
return False
return True
def _add_scene_element(self, new_scene_element, new_position):
""" Method to add a SceneElement to the Playground.
"""
if new_scene_element in self.scene_elements:
raise ValueError('Scene element already in Playground')
new_scene_element.size_playground = self.size
if new_position:
new_scene_element.position = new_scene_element.initial_position
self.space.add(*new_scene_element.pm_elements)
self.scene_elements.append(new_scene_element)
if new_scene_element in self._disappeared_scene_elements:
self._disappeared_scene_elements.remove(new_scene_element)
def _add_scene_element_without_ovelapping(self, scene_element, tries, new_position):
trial = 0
visible_collide = True
interactive_collide = True
all_shapes = self.space.shapes.copy()
while (visible_collide or interactive_collide) and trial < tries:
self._add_scene_element(scene_element, new_position)
visible_collide = False
interactive_collide = False
if scene_element.pm_visible_shape is not None:
collisions = [scene_element.pm_visible_shape.shapes_collide(shape) for shape in all_shapes]
visible_collide = any([len(collision.points) != 0 for collision in collisions])
if scene_element.pm_interaction_shape is not None:
collisions = [scene_element.pm_interaction_shape.shapes_collide(shape) for shape in all_shapes]
interactive_collide = any([len(collision.points) != 0 for collision in collisions])
if visible_collide or interactive_collide:
self.remove_scene_element(scene_element)
trial += 1
if visible_collide or interactive_collide:
return False
return True
def add_scene_element(self, scene_element, tries=100, new_position=True):
""" Method to add a SceneElement to the Playground.
If the Element has its attribute allow_overlapping set to False,
the playground will try to add it multiple times.
Useful when a SceneElement has a random initial position, to avoid overlapping.
Args:
scene_element: Scene Element to add to the Playground
tries: Number of times the Playground will try to place the new_entity
"""
if scene_element.entity_type is SceneElementTypes.FIELD:
# If already there
if scene_element in self.fields:
raise ValueError('Field already in Playground')
self.fields.append(scene_element)
else:
if scene_element in self.scene_elements:
raise ValueError('Field already in Playground')
# Else
scene_element.size_playground = self.size
if scene_element.allow_overlapping:
self._add_scene_element(scene_element, new_position)
else:
success = self._add_scene_element_without_ovelapping(scene_element, tries = tries, new_position=new_position)
if not success:
raise ValueError('Entity could not be placed without overlapping')
def _remove_agents(self):
for agent in self.agents:
self.remove_agent(agent)
def remove_agent(self, agent):
if agent not in self.agents:
return False
for part in agent.parts:
self.space.remove(*part.pm_elements)
part.velocity = [0, 0, 0]
part.grasped = []
agent.initial_position = None
self.agents.remove(agent)
return True
def remove_scene_element(self, scene_element):
if scene_element not in self.scene_elements:
return False
self.space.remove(*scene_element.pm_elements)
self.scene_elements.remove(scene_element)
if not scene_element.is_temporary_entity:
self._disappeared_scene_elements.append(scene_element)
for elem in self.scene_elements:
if elem.entity_type == 'dispenser' and scene_element in elem.produced_entities:
elem.produced_entities.remove(scene_element)
for field in self.fields:
if scene_element in field.produced_entities:
field.produced_entities.remove(scene_element)
if scene_element in self._grasped_scene_elements.keys():
body_part = self._grasped_scene_elements[scene_element]
self.space.remove(*body_part.grasped)
body_part.grasped = []
# self._grasped_scene_elements.pop(scene_element)
return True
def _fields_produce(self):
for field in self.fields:
if field.can_produce():
new_entity = field.produce()
self.add_scene_element(new_entity)
def _check_timers(self):
for entity in self.scene_elements:
if entity.timed and entity.timer == 0:
list_remove, list_add = entity.activate(self)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
def _release_grasps(self):
for agent in self.agents:
for part in agent.parts:
if not part.is_holding and part.can_grasp:
for joint in part.grasped:
self.space.remove(joint)
part.grasped = []
for element_grasped, part in self._grasped_scene_elements.copy().items():
if not part.grasped:
self._grasped_scene_elements.pop(element_grasped)
def _check_teleports(self):
for agent, teleport in self._teleported:
overlaps = self.agent_overlaps_with_element(agent, teleport)
if not overlaps:
self._teleported.remove((agent, teleport))
def agent_overlaps_with_element(self, agent, element):
overlaps = False
for part in agent.parts:
if element.pm_visible_shape is not None:
overlaps = overlaps or part.pm_visible_shape.shapes_collide(element.pm_visible_shape).points != []
if element.pm_interaction_shape is not None:
overlaps = overlaps or part.pm_visible_shape.shapes_collide(element.pm_interaction_shape).points != []
return overlaps
def get_scene_element_from_shape(self, pm_shape):
"""
Returns: Returns the Scene Element associated with the pymunk shape.
"""
entity = next(iter([e for e in self.scene_elements if pm_shape in e.pm_elements]), None)
return entity
def get_agent_from_shape(self, pm_shape):
"""
Returns: Returns the Agent associated with the pymunk shape.
"""
for agent in self.agents:
if agent.owns_shape(pm_shape):
return agent
return None
def get_entity_from_shape(self, pm_shape):
"""
Returns the element associated with the pymunk shape
Args:
pm_shape: Pymunk shaape
Returns:
Single entitiy or None
"""
scene_element = self.get_scene_element_from_shape(pm_shape)
if scene_element is not None: return scene_element
for agent in self.agents:
part = agent.get_bodypart_from_shape(pm_shape)
if part is not None: return part
return None
def _get_closest_agent(self, ent):
dist_list = [(a.position[0] - ent.position[0])**2 + (a.position[1] - ent.position[1])**2 for a in self.agents]
index_min_dist = dist_list.index(min(dist_list))
closest_agent = self.agents[index_min_dist]
return closest_agent
def _agent_touches_entity(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
touched_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if touched_entity is None: return True
agent.reward += touched_entity.reward
list_remove, list_add = touched_entity.activate()
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
if touched_entity.terminate_upon_contact:
self.done = True
return True
def _agent_interacts(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
body_part = agent.get_bodypart_from_shape(arbiter.shapes[0])
interacting_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if interacting_entity is None: return True
if body_part.is_activating:
agent.reward += interacting_entity.reward
list_remove, list_add = interacting_entity.activate(body_part)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
if interacting_entity.terminate_upon_contact:
self.done = True
body_part.is_activating = False
return True
def _agent_grasps(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
body_part = agent.get_bodypart_from_shape(arbiter.shapes[0])
interacting_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if interacting_entity is None: return True
if body_part.is_grasping and not body_part.is_holding:
body_part.is_holding = True
j_1 = pymunk.PinJoint(body_part.pm_body, interacting_entity.pm_body, (0, 5), (0, 0))
j_2 = pymunk.PinJoint(body_part.pm_body, interacting_entity.pm_body, (0, -5), (0, 0))
motor = pymunk.SimpleMotor(body_part.pm_body, interacting_entity.pm_body, 0)
self.space.add(j_1, j_2, motor) # , j_3, j_4, j_5, j_6, j_7, j_8)
body_part.grasped = [j_1, j_2, motor] # , j_3, j_4, j_5, j_6, j_7, j_8]
self._grasped_scene_elements[interacting_entity] = body_part
return True
def _agent_enters_zone(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
zone_reached = self.get_scene_element_from_shape(arbiter.shapes[1])
if zone_reached is None: return True
agent.reward += zone_reached.reward
if zone_reached.terminate_upon_contact:
self.done = True
return True
def _gem_interacts(self, arbiter, space, data):
gem = self.get_scene_element_from_shape(arbiter.shapes[0])
interacting_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if interacting_entity is None or gem is None: return True
agent = self._get_closest_agent(gem)
agent.reward += interacting_entity.reward
list_remove, list_add = interacting_entity.activate(gem)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
if interacting_entity.terminate_upon_contact:
self.done = True
return True
def _agent_eats(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
body_part = agent.get_bodypart_from_shape(arbiter.shapes[0])
edible_entity = self.get_scene_element_from_shape(arbiter.shapes[1])
if edible_entity is None: return True
if body_part.is_eating:
agent.reward += edible_entity.get_reward()
self.remove_scene_element(edible_entity)
completely_eaten = edible_entity.eats()
if not completely_eaten:
self.add_scene_element(edible_entity, new_position=False)
body_part.is_eating = False
return True
def _agent_teleports(self, arbiter, space, data):
agent = self.get_agent_from_shape(arbiter.shapes[0])
teleport = self.get_scene_element_from_shape(arbiter.shapes[1])
if teleport is None or teleport.target is None or (agent, teleport) in self._teleported:
return True
if teleport.target.traversable:
agent.position = (teleport.target.position[0], teleport.target.position[1],
agent.position[2])
else:
area_shape = teleport.target.physical_shape
if area_shape == 'rectangle':
width = teleport.target.width + agent.base_platform.radius * 2 + 1
length = teleport.target.length + agent.base_platform.radius * 2 + 1
angle = teleport.target.position[-1]
sampler = PositionAreaSampler(
center=[teleport.target.position[0], teleport.target.position[1]],
area_shape=area_shape,
angle=angle,
width_length=[width+2, length+2],
excl_width_length=[width, length],
)
else:
radius = teleport.target.radius + agent.base_platform.radius + 1
sampler = PositionAreaSampler(
center=[teleport.target.position[0], teleport.target.position[1]],
area_shape='circle',
radius=radius,
excl_radius=radius,
)
agent.position = sampler.sample()
if (agent, teleport) not in self._teleported:
self._teleported.append((agent, teleport.target))
return True
def _handle_interactions(self):
# Order is important
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.GRASPABLE, self._agent_grasps)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.CONTACT, self._agent_touches_entity)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.EDIBLE, self._agent_eats)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.INTERACTIVE, self._agent_interacts)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.PASSIVE, self._agent_enters_zone)
self.add_interaction(CollisionTypes.GEM, CollisionTypes.ACTIVATED_BY_GEM, self._gem_interacts)
self.add_interaction(CollisionTypes.AGENT, CollisionTypes.TELEPORT, self._agent_teleports)
def add_interaction(self, collision_type_1, collision_type_2, interaction_function):
"""
Args:
collision_type_1: collision type of the first entity
collision_type_2: collision type of the second entity
interaction_function: function that handles the interaction
Returns: None
"""
handler = self.space.add_collision_handler(collision_type_1, collision_type_2)
handler.pre_solve = interaction_function
class PlaygroundRegister:
"""
Class to register Playgrounds.
"""
playgrounds = {}
@classmethod
def register(cls, playground_name):
"""
Registers a playground
"""
def decorator(subclass):
if playground_name in cls.playgrounds:
raise ValueError(playground_name+' already registered')
cls.playgrounds[playground_name] = subclass
return subclass
return decorator
@classmethod
def filter(cls, name):
return [pg for name_pg, pg in cls.playgrounds.items() if name in name_pg]
| 31.893733 | 125 | 0.641948 | 23,060 | 0.985049 | 0 | 0 | 1,397 | 0.059675 | 0 | 0 | 4,363 | 0.186373 |
402b9f4345d8a408ad36e88d31b1b6668765cd8b | 2,679 | py | Python | UEManifestReader/classes/FManifestData.py | ryryburge/UEManifestReader | 970b24dd80fc6b5d599d1bd77de78a1b19f4432e | [
"MIT"
]
| null | null | null | UEManifestReader/classes/FManifestData.py | ryryburge/UEManifestReader | 970b24dd80fc6b5d599d1bd77de78a1b19f4432e | [
"MIT"
]
| null | null | null | UEManifestReader/classes/FManifestData.py | ryryburge/UEManifestReader | 970b24dd80fc6b5d599d1bd77de78a1b19f4432e | [
"MIT"
]
| null | null | null |
# -*- coding: utf-8 -*-
import zlib
from UEManifestReader.enums import *
from UEManifestReader.classes.FCustomFields import FCustomFields
from UEManifestReader.classes.FManifestMeta import FManifestMeta
from UEManifestReader.classes.FChunkDataList import FChunkDataList
from UEManifestReader.classes.FManifestHeader import FManifestHeader
from UEManifestReader.classes.stream_reader import ConstBitStreamWrapper
from UEManifestReader.classes.FFileManifestList import FFileManifestList
# FManifestData - The public interface to load/saving manifest files.
class FManifestData():
def __init__(self, data: bytes):
self.reader = ConstBitStreamWrapper(data)
self.start()
def start(self):
StartPos = self.reader.bytepos
# Read the Manifest Header
self.Header = FManifestHeader(self.reader)
# If we are loading an old format, defer to the old code!
if (self.Header.Version.value < EFeatureLevel.StoredAsBinaryData.value):
FullDataSize = GetFullDataSize(Header)
FullData = reader.read_bytes(FullDataSize)
self.reader.bytepos = StartPos
temp = FManifestData(self.reader.read_bytes(FullDataSize))
self.Meta = temp.Meta
self.ChunkDataList = temp.ChunkDataList
self.FileManifestList = temp.FileManifestList
self.CustomFields = temp.CustomFields
return
else:
# Compression format selection - we only have one right now.
# Fill the array with loaded data.
# DataSizeCompressed always equals the size of the data following the header.
if self.Header.StoredAs == EManifestStorageFlags.Compressed.value:
Decompressed = zlib.decompress(self.reader.read_bytes(self.Header.DataSizeCompressed))
ManifestRawData = ConstBitStreamWrapper(Decompressed)
elif self.Header.StoredAs == EManifestStorageFlags.Encrypted.value:
raise Exception('Encrypted Manifests are not supported yet')
# Read the Manifest Meta
self.Meta = FManifestMeta(ManifestRawData)
# Read the Manifest Chunk List
self.ChunkDataList = FChunkDataList(ManifestRawData)
# Read the Manifest File List
self.FileManifestList = FFileManifestList(ManifestRawData)
# Read the Custom Fields
self.CustomFields = FCustomFields(ManifestRawData)
def GetFullDataSize(self) -> int:
bIsCompressed = self.Header.StoredAs == EManifestStorageFlags.Compressed
return self.Header.HeaderSize + (bIsCompressed if Header.DataSizeCompressed else Header.DataSizeUncompressed)
| 46.189655 | 117 | 0.711459 | 2,121 | 0.791713 | 0 | 0 | 0 | 0 | 0 | 0 | 496 | 0.185144 |
402ba89b6c4bbf8923f29b3e69bf5634d07e5b15 | 98 | py | Python | Python/module.py | minjibyeongho/KOSA-Pytorch | 80d71a8c579d645bea4c3352c9babdf232a8630e | [
"MIT"
]
| 2 | 2021-05-25T08:52:07.000Z | 2021-08-13T23:49:42.000Z | Python/module.py | minjibyeongho/KOSA-Pytorch | 80d71a8c579d645bea4c3352c9babdf232a8630e | [
"MIT"
]
| null | null | null | Python/module.py | minjibyeongho/KOSA-Pytorch | 80d71a8c579d645bea4c3352c9babdf232a8630e | [
"MIT"
]
| 2 | 2021-05-24T00:49:45.000Z | 2021-06-11T01:30:12.000Z | #module.py
def hello():
print("Hello!")
#if __name__=="__main__":
# print(__name__) | 14 | 26 | 0.581633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.663265 |
402c6d1527bb64bf420904254134ab7105236ec8 | 10,690 | py | Python | data_utils.py | algoprog/Quin | c1fd3b8e5e2163217f6c8062620ee0c1dfeed0e8 | [
"MIT"
]
| 47 | 2020-08-02T12:28:07.000Z | 2022-03-30T01:56:57.000Z | data_utils.py | algoprog/Quin | c1fd3b8e5e2163217f6c8062620ee0c1dfeed0e8 | [
"MIT"
]
| 4 | 2020-09-20T17:31:51.000Z | 2021-12-02T17:40:03.000Z | data_utils.py | algoprog/Quin | c1fd3b8e5e2163217f6c8062620ee0c1dfeed0e8 | [
"MIT"
]
| 4 | 2020-11-23T15:47:34.000Z | 2021-03-30T02:02:02.000Z | import csv
import json
import pickle
import logging
import re
import pandas
import gzip
import os
import numpy as np
from random import randint, random
from tqdm import tqdm
from retriever.dense_retriever import DenseRetriever
from models.tokenization import tokenize
from typing import Union, List
class InputExample:
"""
Structure for one input example with texts, the label and a unique id
"""
def __init__(self, guid: str, texts: List[str], label: Union[int, float]):
"""
Creates one InputExample with the given texts, guid and label
str.strip() is called on both texts.
:param guid
id for the example
:param texts
the texts for the example
:param label
the label for the example
"""
self.guid = guid
self.texts = [text.strip() for text in texts]
self.label = label
def get_texts(self):
return self.texts
def get_label(self):
return self.label
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def get_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['label']
guid = "%s-%d" % (filename, id)
id += 1
if label == 'entailment':
label = 0
elif label == 'contradiction':
label = 1
else:
label = 2
examples.append(InputExample(guid=guid,
texts=[sample['s1'], sample['s2']],
label=label))
if 0 < max_examples <= len(examples):
break
return examples
def get_qa_examples(filename, max_examples=0, dev=False):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['relevant']
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=label))
if not dev:
if label == 1:
for _ in range(13):
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=label))
if 0 < max_examples <= len(examples):
break
return examples
def map_label(label):
labels = {"relevant": 0, "irrelevant": 1}
return labels[label.strip().lower()]
def get_qar_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=1.0))
if 0 < max_examples <= len(examples):
break
return examples
def get_qar_artificial_examples():
examples = []
id = 0
print('Loading passages...')
passages = []
file = open('data/msmarco/collection.tsv', 'r', encoding='utf8')
while True:
line = file.readline()
if not line:
break
line = line.rstrip('\n').split('\t')
passages.append(line[1])
print('Loaded passages')
with open('data/qar/qar_artificial_queries.csv') as f:
for i, line in enumerate(f):
queries = line.rstrip('\n').split('|')
for query in queries:
guid = "%s-%d" % ('', id)
id += 1
examples.append(InputExample(guid=guid,
texts=[query, passages[i]],
label=1.0))
return examples
def get_single_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['text']],
label=1))
if 0 < max_examples <= len(examples):
break
return examples
def get_qnli_examples(filename, max_examples=0, no_contradictions=False, fever_only=False):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['label']
if label == 'contradiction' and no_contradictions:
continue
if sample['evidence'] == '':
continue
if fever_only and sample['source'] != 'fever':
continue
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['statement'].strip(), sample['evidence'].strip()],
label=1.0))
if 0 < max_examples <= len(examples):
break
return examples
def get_retrieval_examples(filename, negative_corpus='data/msmarco/collection.tsv', max_examples=0, no_statements=True,
encoder_model=None, negative_samples_num=4):
examples = []
queries = []
passages = []
negative_passages = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
if 'evidence' in sample and sample['evidence'] == '':
continue
guid = "%s-%d" % (filename, id)
id += 1
if sample['type'] == 'question':
query = sample['question']
passage = sample['answer']
else:
query = sample['statement']
passage = sample['evidence']
query = query.strip()
passage = passage.strip()
if sample['type'] == 'statement' and no_statements:
continue
queries.append(query)
passages.append(passage)
if sample['source'] == 'natural-questions':
negative_passages.append(passage)
if max_examples == len(passages):
break
if encoder_model is not None:
# Load MSMARCO passages
logging.info('Loading MSM passages...')
with open(negative_corpus) as file:
for line in file:
p = line.rstrip('\n').split('\t')[1]
negative_passages.append(p)
logging.info('Building ANN index...')
dense_retriever = DenseRetriever(model=encoder_model, batch_size=1024, use_gpu=True)
dense_retriever.create_index_from_documents(negative_passages)
results = dense_retriever.search(queries=queries, limit=100, probes=256)
negative_samples = [
[negative_passages[p[0]] for p in r if negative_passages[p[0]] != passages[i]][:negative_samples_num]
for i, r in enumerate(results)
]
# print(queries[0])
# print(negative_samples[0][0])
for i in range(len(queries)):
texts = [queries[i], passages[i]] + negative_samples[i]
examples.append(InputExample(guid=guid,
texts=texts,
label=1.0))
else:
for i in range(len(queries)):
texts = [queries[i], passages[i]]
examples.append(InputExample(guid=guid,
texts=texts,
label=1.0))
return examples
def get_pair_input(tokenizer, sent1, sent2, max_len=256):
text = "[CLS] {} [SEP] {} [SEP]".format(sent1, sent2)
tokenized_text = tokenizer.tokenize(text)[:max_len]
indexed_tokens = tokenizer.encode(text)[:max_len]
segments_ids = []
sep_flag = False
for i in range(len(tokenized_text)):
if tokenized_text[i] == '[SEP]' and not sep_flag:
segments_ids.append(0)
sep_flag = True
elif sep_flag:
segments_ids.append(1)
else:
segments_ids.append(0)
return indexed_tokens, segments_ids
def build_batch(tokenizer, text_list, max_len=256):
token_id_list = []
segment_list = []
attention_masks = []
longest = -1
for pair in text_list:
sent1, sent2 = pair
ids, segs = get_pair_input(tokenizer, sent1, sent2, max_len=max_len)
if ids is None or segs is None:
continue
token_id_list.append(ids)
segment_list.append(segs)
attention_masks.append([1] * len(ids))
if len(ids) > longest:
longest = len(ids)
if len(token_id_list) == 0:
return None, None, None
# padding
assert (len(token_id_list) == len(segment_list))
for ii in range(len(token_id_list)):
token_id_list[ii] += [0] * (longest - len(token_id_list[ii]))
attention_masks[ii] += [1] * (longest - len(attention_masks[ii]))
segment_list[ii] += [1] * (longest - len(segment_list[ii]))
return token_id_list, segment_list, attention_masks
def load_unsupervised_dataset(dataset_file):
print('Loading dataset...')
x = pickle.load(open(dataset_file, "rb"))
print('Done')
return x, len(x[0])
def load_supervised_dataset(dataset_file):
print('Loading dataset...')
d = pickle.load(open(dataset_file, "rb"))
print('Done')
return d[0], d[1]
| 31.627219 | 119 | 0.528718 | 1,075 | 0.100561 | 0 | 0 | 0 | 0 | 0 | 0 | 1,207 | 0.112909 |
402d9bbc776d0b10c128c8af7e8de8955e864e57 | 327 | py | Python | hc/accounts/migrations/0025_remove_member_team.py | opsct/healthchecks | 069bc9b735c0473aed9946104ab85238d065bea1 | [
"BSD-3-Clause"
]
| null | null | null | hc/accounts/migrations/0025_remove_member_team.py | opsct/healthchecks | 069bc9b735c0473aed9946104ab85238d065bea1 | [
"BSD-3-Clause"
]
| 1 | 2021-06-10T23:14:00.000Z | 2021-06-10T23:14:00.000Z | hc/accounts/migrations/0025_remove_member_team.py | opsct/healthchecks | 069bc9b735c0473aed9946104ab85238d065bea1 | [
"BSD-3-Clause"
]
| null | null | null | # Generated by Django 2.1.5 on 2019-01-22 08:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0024_auto_20190119_1540'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='team',
),
]
| 18.166667 | 48 | 0.590214 | 242 | 0.740061 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.293578 |
402da872d024b72e61193b2048d5c5fe8a54f2e6 | 6,671 | py | Python | openstack-dashboard/openstack_dashboard/api/proxy.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
]
| 1 | 2019-09-11T11:56:19.000Z | 2019-09-11T11:56:19.000Z | openstack-dashboard/openstack_dashboard/api/proxy.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
]
| null | null | null | openstack-dashboard/openstack_dashboard/api/proxy.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
]
| null | null | null | from django.conf import settings
#from proxyclient.v2 import client as proxy_client
from openstack_dashboard.utils import proxy_client
def proxyclient(request):
management_url = getattr(settings, 'MANAGEMENT_URL')
conn = proxy_client.Client(request.user.username,
request.user.token.id,
user_id=request.user.id,
project_id=request.user.project_id,
insecure=False,
cacert=None,
http_log_debug=settings.DEBUG)
conn.client.auth_token = request.user.token.id
conn.client.set_management_url(management_url)
return conn
def authenticate(request, username, password, **kwargs):
return proxyclient(request).users.authenticate(username, password, **kwargs)
def authenticate_by_zone(request, zone_id):
return proxyclient(request).users.authenticate_by_zone(request.user.id, zone_id)
def user_list(request):
return proxyclient(request).users.list()
def user_get(request):
return proxyclient(request).users.get(request.user.id)
def user_delete(request, user_id):
return proxyclient(request).users.delete(user_id)
def user_login_list(request, user_id=None):
return proxyclient(request).users.login_list(user_id=user_id)
def availability_zone_list(request, detail=False):
return proxyclient(request).zones.list(detail=detail)
def availability_zone_get(request, id):
return proxyclient(request).zones.get(id)
def zone_create(request, id=None, name=None, auth_url=None,
auth_token=None, default_instances=None):
return proxyclient(request).zones.create(
id=id, name=name, auth_url=auth_url, auth_token=auth_token,
default_instances=default_instances)
def zone_delete(request, zone_id):
proxyclient(request).zones.delete(zone_id)
#
#def logout(request):
# _proxy(request).logout(request.user.id)
def server_list(request, all_tenants=False):
return proxyclient(request).servers.list(all_tenants=all_tenants)
def server_get(request, instance_id):
return proxyclient(request).servers.get(instance_id)
def server_create(request, name, image, flavor, zone_id=None,
key_name=None, user_data=None, security_groups=None,
block_device_mapping=None, block_device_mapping_v2=None, nics=None,
availability_zone=None, instance_count=1, admin_pass=None,
disk_config=None, accessIPv4=None, gateway=None, net_type=None): #cg
return proxyclient(request).servers.create(
name, image, flavor, zone_id=zone_id,
user_data=user_data, security_groups=security_groups,
key_name=key_name, block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
nics=nics, availability_zone=availability_zone,
instance_count=instance_count, admin_pass=admin_pass,
disk_config=disk_config, accessIPv4=accessIPv4,
gateway=gateway, netype=net_type)
def server_delete(request, instance_id):
proxyclient(request).servers.delete(instance_id)
def server_start(request, instance_id):
proxyclient(request).servers.start(instance_id)
def server_stop(request, instance_id):
proxyclient(request).servers.stop(instance_id)
def image_list_detailed(request, zone_id, filters=None):
return image_get(request, zone_id, filters=filters), False
def image_get(request, zone, filters=None):
return proxyclient(request).images.get(zone, filters=filters)
def image_delete(request, image_id):
proxyclient(request).images.delete(image_id)
def image_rebuild(request, zone):
return proxyclient(request).images.rebuild(zone)
def flavor_list(request, zone):
return proxyclient(request).flavors.get(zone)
def flavor_get_by_zone(request, zone):
return proxyclient(request).flavors.get(zone)
def flavor_delete(request, flavor_id):
proxyclient(request).flavors.delete(flavor_id)
def flavor_rebuild(request, zone):
return proxyclient(request).flavors.rebuild(zone)
def gateway_list(request):
return proxyclient(request).gateways.list()
def gateway_get(request, instance_id):
return proxyclient(request).gateways.get_by_instance(instance_id)
def gateway_get_by_zone(request, zone):
return proxyclient(request).gateways.get_by_zone(zone)
def gateway_delete(request, gateway_id):
proxyclient(request).gateways.delete(gateway_id)
def gateway_rebuild(request, zone):
return proxyclient(request).gateways.rebuild(zone)
def network_get_by_zone(request, zone):
return proxyclient(request).networks.get(zone)
def network_delete(request, network_id):
proxyclient(request).networks.delete(network_id)
def network_rebuild(request, zone):
return proxyclient(request).networks.rebuild(zone)
def network_type_list(request):
return proxyclient(request).networks.network_type_list()
def network_type_delete(request, id):
proxyclient(request).networks.network_type_delete(id)
def security_group_list(request):
return proxyclient(request).security_groups.list()
def security_group_update(request, **kwargs):
proxyclient(request).security_groups.update(**kwargs)
def firewall_list(request):
return proxyclient(request).firewalls.list()
def firewall_get(request, id):
return proxyclient(request).firewalls.get(id)
def firewall_create(request, instance_id, hostname, gateway_port,
service_port):
return proxyclient(request).firewalls.create(
instance_id=instance_id, hostname=hostname,
gateway_port=gateway_port, service_port=service_port)
def firewall_exist(request, instance_id, hostname=None, gateway_port=None):
return proxyclient(request).firewalls.exists(
instance_id, hostname=hostname, gateway_port=gateway_port)
def firewall_delete(request, firewall_id):
proxyclient(request).firewalls.delete(firewall_id)
#
def project_absolute_limits(request, zone_id):
return proxyclient(request).users.user_absolute_limits(zone_id)
def user_absolute_limits(request):
return proxyclient(request).users.user_absolute_limits()
def resource_list(request, user_id=None):
return proxyclient(request).resources.list(
user_id=user_id or request.user.id)
def resource_get(request, user_id=None, source_name=None, source_id=None):
filters = {'source_id': source_id, 'source_name': source_name}
return proxyclient(request).resources.get(
user_id or request.user.id, filters=filters)
def get_monitor(request, instance):
return proxyclient(request).servers.monitor(instance)
| 35.296296 | 86 | 0.746665 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.023984 |
402e10f9fc439179bd0a8ffc8b3cd706de061b93 | 251 | py | Python | tfl_data.py | dongyan1024/overtime | 4f722a823585890026fe9584ba5985963b2a586c | [
"MIT"
]
| 9 | 2020-10-15T13:53:36.000Z | 2022-03-08T12:08:09.000Z | tfl_data.py | dongyan1024/overtime | 4f722a823585890026fe9584ba5985963b2a586c | [
"MIT"
]
| 6 | 2021-02-07T15:43:12.000Z | 2021-04-24T04:03:39.000Z | tfl_data.py | dongyan1024/overtime | 4f722a823585890026fe9584ba5985963b2a586c | [
"MIT"
]
| 7 | 2020-10-15T13:55:12.000Z | 2022-03-12T03:54:02.000Z |
import overtime as ot
times = ['14:00','14:05', '14:10', '14:15', '14:20', '14:25', '14:30', '14:35', '14:40', '14:45', '14:50', '14:55']
tfl_data = ot.TflInput(['victoria', 'central', 'bakerloo', 'piccadilly'], ['inbound', 'outbound'], times)
| 41.833333 | 116 | 0.565737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.573705 |
402eafa1a88db63bd7cacd91e03e8377d8b8d5d8 | 2,375 | py | Python | apps/dc_tools/odc/apps/dc_tools/fs_to_dc.py | opendatacube/odc-tools | 42950e93305846b640a1c6135c9da16ba76c1b3a | [
"Apache-2.0"
]
| 29 | 2019-09-18T10:21:07.000Z | 2022-03-10T07:46:57.000Z | apps/dc_tools/odc/apps/dc_tools/fs_to_dc.py | opendatacube/odc-tools | 42950e93305846b640a1c6135c9da16ba76c1b3a | [
"Apache-2.0"
]
| 259 | 2019-12-11T03:19:01.000Z | 2022-03-31T22:46:11.000Z | apps/dc_tools/odc/apps/dc_tools/fs_to_dc.py | opendatacube/odc-tools | 42950e93305846b640a1c6135c9da16ba76c1b3a | [
"Apache-2.0"
]
| 18 | 2020-01-22T14:50:27.000Z | 2022-03-01T14:48:12.000Z | import json
from pathlib import Path
import click
import datacube
from datacube.index.hl import Doc2Dataset
from odc.apps.dc_tools.utils import (
index_update_dataset,
update_if_exists,
allow_unsafe,
transform_stac,
)
from ._stac import stac_transform
from typing import Generator, Optional
import logging
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
logging.basicConfig(
level=logging.WARNING,
format="%(asctime)s: %(levelname)s: %(message)s",
datefmt="%m/%d/%Y %I:%M:%S",
)
def _find_files(
path: str, glob: Optional[str] = None, stac: Optional[bool] = False
) -> Generator[Path, None, None]:
if glob is None:
glob = "**/*.json" if stac else "**/*.yaml"
return Path(path).glob(glob)
@click.command("fs-to-dc")
@click.argument("input_directory", type=str, nargs=1)
@update_if_exists
@allow_unsafe
@transform_stac
@click.option(
"--glob",
default=None,
help="File system glob to use, defaults to **/*.yaml or **/*.json for STAC.",
)
def cli(input_directory, update_if_exists, allow_unsafe, stac, glob):
dc = datacube.Datacube()
doc2ds = Doc2Dataset(dc.index)
if glob is None:
glob = "**/*.json" if stac else "**/*.yaml"
files_to_process = _find_files(input_directory, glob, stac=stac)
added, failed = 0, 0
for in_file in files_to_process:
with in_file.open() as f:
try:
if in_file.endswith(".yml") or in_file.endswith(".yaml"):
metadata = yaml.safe_load(f, Loader=Loader)
else:
metadata = json.load(f)
# Do the STAC Transform if it's flagged
if stac:
metadata = stac_transform(metadata)
index_update_dataset(
metadata,
in_file.absolute().as_uri(),
dc=dc,
doc2ds=doc2ds,
update_if_exists=update_if_exists,
allow_unsafe=allow_unsafe,
)
added += 1
except Exception as e:
logging.exception(f"Failed to add dataset {in_file} with error {e}")
failed += 1
logging.info(f"Added {added} and failed {failed} datasets.")
if __name__ == "__main__":
cli()
| 26.388889 | 84 | 0.596211 | 0 | 0 | 0 | 0 | 1,534 | 0.645895 | 0 | 0 | 367 | 0.154526 |
4030d959e7cf60e57a2223602eae1667433715a2 | 651 | py | Python | scripts/fullizer.py | stijm/jazzjackrabbit2 | e47f1c42fd7c450c2e12bcb7dcaae0f695a0dc12 | [
"MIT"
]
| 5 | 2021-08-03T20:02:00.000Z | 2021-11-19T20:29:36.000Z | scripts/fullizer.py | stijm/jj2 | e47f1c42fd7c450c2e12bcb7dcaae0f695a0dc12 | [
"MIT"
]
| null | null | null | scripts/fullizer.py | stijm/jj2 | e47f1c42fd7c450c2e12bcb7dcaae0f695a0dc12 | [
"MIT"
]
| null | null | null | """
WARNING:
Using this script outside any server except one with IP 127.0.0.1 means risking getting
an instant and permanent ban, anywhere you use it.
The script was created *ONLY FOR LOCAL* testing purposes.
NEVER, NEVER, *NEVER* run it in an online multiplayer server.
At least unless you're a dumb freak.
"""
import multiprocessing
import time
from scripts import play
if __name__ == '__main__':
for i in range(1, 33):
process = multiprocessing.Process(
target=play,
kwargs=dict(nick=f'Player {i}', connect=['127.0.0.1'], new_sgip=False),
)
process.start()
time.sleep(0.09)
| 26.04 | 91 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.545315 |
403251bad5543a2ea9b5b81f85773876a2b6f3ba | 1,458 | py | Python | setup.py | pranithk/gluster-georep-tools | 3c8c7dcf63042613b002385edcead7c1ec079e61 | [
"MIT"
]
| null | null | null | setup.py | pranithk/gluster-georep-tools | 3c8c7dcf63042613b002385edcead7c1ec079e61 | [
"MIT"
]
| null | null | null | setup.py | pranithk/gluster-georep-tools | 3c8c7dcf63042613b002385edcead7c1ec079e61 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
gluster-georep-tools.setup.py
:copyright: (c) 2016 by Aravinda VK
:license: MIT, see LICENSE for more details.
"""
from setuptools import setup
setup(
name="gluster-georep-tools",
version="0.2",
packages=["gluster_georep_tools",
"gluster_georep_tools.status",
"gluster_georep_tools.setup"],
include_package_data=True,
install_requires=['argparse', 'paramiko', 'glustercli'],
entry_points={
"console_scripts": [
"gluster-georep-setup = gluster_georep_tools.setup.cli:main",
"gluster-georep-status = gluster_georep_tools.status.cli:main",
]
},
platforms="linux",
zip_safe=False,
author="Aravinda VK",
author_email="[email protected]",
description="Gluster Geo-replication tools",
license="MIT",
keywords="gluster, tool, geo-replication",
url="https://github.com/aravindavk/gluster-georep-tools",
long_description="""
Gluster Geo-replication Tools
""",
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2 :: Only"
],
)
| 30.375 | 75 | 0.61454 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 943 | 0.646776 |
403346598a2baf176ef8cdcf1186f9c5ce45137d | 14,184 | py | Python | docs/_downloads/dbc5873471dad3c21022112121cbd008/tensorboard_profiler_tutorial.py | woojinsong/PyTorch-tutorials-kr | 36fefd556f45c2b1f5db912793172c0369430fd4 | [
"BSD-3-Clause"
]
| 221 | 2018-04-06T01:42:58.000Z | 2021-11-28T10:12:45.000Z | intermediate_source/tensorboard_profiler_tutorial.py | konlidoo/tutorials | 75b1c673a73ca285a16f52a62fc8ffcc6d069936 | [
"BSD-3-Clause"
]
| 280 | 2018-05-25T08:53:21.000Z | 2021-12-02T05:37:25.000Z | intermediate_source/tensorboard_profiler_tutorial.py | konlidoo/tutorials | 75b1c673a73ca285a16f52a62fc8ffcc6d069936 | [
"BSD-3-Clause"
]
| 181 | 2018-05-25T02:00:28.000Z | 2021-11-19T11:56:39.000Z | """
PyTorch Profiler With TensorBoard
====================================
This tutorial demonstrates how to use TensorBoard plugin with PyTorch Profiler
to detect performance bottlenecks of the model.
Introduction
------------
PyTorch 1.8 includes an updated profiler API capable of
recording the CPU side operations as well as the CUDA kernel launches on the GPU side.
The profiler can visualize this information
in TensorBoard Plugin and provide analysis of the performance bottlenecks.
In this tutorial, we will use a simple Resnet model to demonstrate how to
use TensorBoard plugin to analyze model performance.
Setup
-----
To install ``torch`` and ``torchvision`` use the following command:
::
pip install torch torchvision
"""
######################################################################
# Steps
# -----
#
# 1. Prepare the data and model
# 2. Use profiler to record execution events
# 3. Run the profiler
# 4. Use TensorBoard to view results and analyze model performance
# 5. Improve performance with the help of profiler
# 6. Analyze performance with other advanced features
#
# 1. Prepare the data and model
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# First, import all necessary libraries:
#
import torch
import torch.nn
import torch.optim
import torch.profiler
import torch.utils.data
import torchvision.datasets
import torchvision.models
import torchvision.transforms as T
######################################################################
# Then prepare the input data. For this tutorial, we use the CIFAR10 dataset.
# Transform it to the desired format and use DataLoader to load each batch.
transform = T.Compose(
[T.Resize(224),
T.ToTensor(),
T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True)
######################################################################
# Next, create Resnet model, loss function, and optimizer objects.
# To run on GPU, move model and loss to GPU device.
device = torch.device("cuda:0")
model = torchvision.models.resnet18(pretrained=True).cuda(device)
criterion = torch.nn.CrossEntropyLoss().cuda(device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
model.train()
######################################################################
# Define the training step for each batch of input data.
def train(data):
inputs, labels = data[0].to(device=device), data[1].to(device=device)
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
######################################################################
# 2. Use profiler to record execution events
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The profiler is enabled through the context manager and accepts several parameters,
# some of the most useful are:
#
# - ``schedule`` - callable that takes step (int) as a single parameter
# and returns the profiler action to perform at each step.
#
# In this example with ``wait=1, warmup=1, active=3, repeat=2``,
# profiler will skip the first step/iteration,
# start warming up on the second,
# record the following three iterations,
# after which the trace will become available and on_trace_ready (when set) is called.
# In total, the cycle repeats twice. Each cycle is called a "span" in TensorBoard plugin.
#
# During ``wait`` steps, the profiler is disabled.
# During ``warmup`` steps, the profiler starts tracing but the results are discarded.
# This is for reducing the profiling overhead.
# The overhead at the beginning of profiling is high and easy to bring skew to the profiling result.
# During ``active`` steps, the profiler works and records events.
# - ``on_trace_ready`` - callable that is called at the end of each cycle;
# In this example we use ``torch.profiler.tensorboard_trace_handler`` to generate result files for TensorBoard.
# After profiling, result files will be saved into the ``./log/resnet18`` directory.
# Specify this directory as a ``logdir`` parameter to analyze profile in TensorBoard.
# - ``record_shapes`` - whether to record shapes of the operator inputs.
# - ``profile_memory`` - Track tensor memory allocation/deallocation.
# - ``with_stack`` - Record source information (file and line number) for the ops.
# If the TensorBoard is launched in VSCode (`reference <https://code.visualstudio.com/docs/datascience/pytorch-support#_tensorboard-integration>`_),
# clicking a stack frame will navigate to the specific code line.
with torch.profiler.profile(
schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2),
on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/resnet18'),
record_shapes=True,
with_stack=True
) as prof:
for step, batch_data in enumerate(train_loader):
if step >= (1 + 1 + 3) * 2:
break
train(batch_data)
prof.step() # Need to call this at the end of each step to notify profiler of steps' boundary.
######################################################################
# 3. Run the profiler
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Run the above code. The profiling result will be saved under ``./log/resnet18`` directory.
######################################################################
# 4. Use TensorBoard to view results and analyze model performance
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Install PyTorch Profiler TensorBoard Plugin.
#
# ::
#
# pip install torch_tb_profiler
#
######################################################################
# Launch the TensorBoard.
#
# ::
#
# tensorboard --logdir=./log
#
######################################################################
# Open the TensorBoard profile URL in Google Chrome browser or Microsoft Edge browser.
#
# ::
#
# http://localhost:6006/#pytorch_profiler
#
######################################################################
# You could see Profiler plugin page as shown below.
#
# - Overview
# .. image:: ../../_static/img/profiler_overview1.png
# :scale: 25 %
#
# The overview shows a high-level summary of model performance.
#
# The "GPU Summary" panel shows the GPU configuration and the GPU usage.
# In this example, the GPU Utilization is low.
# The details of these metrics are `here <https://github.com/guyang3532/kineto/blob/readme/tb_plugin/docs/gpu_utilization.md>`_.
#
# The "Step Time Breakdown" shows distribution of time spent in each step over different categories of execution.
# In this example, you can see the ``DataLoader`` overhead is significant.
#
# The bottom "Performance Recommendation" uses the profiling data
# to automatically highlight likely bottlenecks,
# and gives you actionable optimization suggestions.
#
# You can change the view page in left "Views" dropdown list.
#
# .. image:: ../../_static/img/profiler_views_list.png
# :alt:
#
#
# - Operator view
# The operator view displays the performance of every PyTorch operator
# that is executed either on the host or device.
#
# .. image:: ../../_static/img/profiler_operator_view.png
# :scale: 25 %
# The "Self" duration does not include its child operators’ time.
# The "Total" duration includes its child operators’ time.
#
# - View call stack
# Click the "View Callstack" of an operator, the operators with same name but different call stacks will be shown.
# Then click a "View Callstack" in this sub-table, the call stack frames will be shown.
#
# .. image:: ../../_static/img/profiler_callstack.png
# :scale: 25 %
#
# If the TensorBoard is launched inside VSCode
# (`Launch Guide <https://devblogs.microsoft.com/python/python-in-visual-studio-code-february-2021-release/#tensorboard-integration>`_),
# clicking a call stack frame will navigate to the specific code line.
#
# .. image:: ../../_static/img/profiler_vscode.png
# :scale: 25 %
#
#
# - Kernel view
# The GPU kernel view shows all kernels’ time spent on GPU.
#
# .. image:: ../../_static/img/profiler_kernel_view.png
# :scale: 25 %
# Mean Blocks per SM:
# Blocks per SM = Blocks of this kernel / SM number of this GPU.
# If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized.
# "Mean Blocks per SM" is weighted average of all runs of this kernel name, using each run’s duration as weight.
#
# Mean Est. Achieved Occupancy:
# Est. Achieved Occupancy is defined in this column’s tooltip.
# For most cases such as memory bandwidth bounded kernels, the higher the better.
# "Mean Est. Achieved Occupancy" is weighted average of all runs of this kernel name,
# using each run’s duration as weight.
#
# - Trace view
# The trace view shows timeline of profiled operators and GPU kernels.
# You can select it to see details as below.
#
# .. image:: ../../_static/img/profiler_trace_view1.png
# :scale: 25 %
#
# You can move the graph and zoom in/out with the help of right side toolbar.
# And keyboard can also be used to zoom and move around inside the timeline.
# The ‘w’ and ‘s’ keys zoom in centered around the mouse,
# and the ‘a’ and ‘d’ keys move the timeline left and right.
# You can hit these keys multiple times until you see a readable representation.
#
# In this example, we can see the event prefixed with ``enumerate(DataLoader)`` costs a lot of time.
# And during most of this period, the GPU is idle.
# Because this function is loading data and transforming data on host side,
# during which the GPU resource is wasted.
######################################################################
# 5. Improve performance with the help of profiler
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# At the bottom of "Overview" page, the suggestion in "Performance Recommendation" hints the bottleneck is DataLoader.
# The PyTorch DataLoader uses single process by default.
# User could enable multi-process data loading by setting the parameter ``num_workers``.
# `Here <https://pytorch.org/docs/stable/data.html#single-and-multi-process-data-loading>`_ is more details.
#
# In this example, we follow the "Performance Recommendation" and set ``num_workers`` as below,
# pass a different name such as ``./log/resnet18_4workers`` to ``tensorboard_trace_handler``, and run it again.
#
# ::
#
# train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4)
#
######################################################################
# Then let’s choose the recently profiled run in left "Runs" dropdown list.
#
# .. image:: ../../_static/img/profiler_overview2.png
# :scale: 25 %
#
# From the above view, we can find the step time is reduced to about 58ms comparing with previous run's 121ms,
# and the time reduction of ``DataLoader`` mainly contributes.
#
# .. image:: ../../_static/img/profiler_trace_view2.png
# :scale: 25 %
#
# From the above view, we can see that the runtime of ``enumerate(DataLoader)`` is reduced,
# and the GPU utilization is increased.
######################################################################
# 6. Analyze performance with other advanced features
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# - Memory view
# To profile memory, please add ``profile_memory=True`` in arguments of ``torch.profiler.profile``.
#
# Note: Because of the current non-optimized implementation of PyTorch profiler,
# enabling ``profile_memory=True`` will take about several minutes to finish.
# To save time, you can try our existing examples first by running:
#
# ::
#
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/memory_demo
#
# The profiler records all memory allocation/release events during profiling.
# For every specific operator, the plugin aggregates all these memory events inside its life span.
#
# .. image:: ../../_static/img/profiler_memory_view.png
# :scale: 25 %
#
# The memory type could be selected in "Device" selection box.
# For example, "GPU0" means the following table only shows each operator’s memory usage on GPU 0, not including CPU or other GPUs.
#
# The "Size Increase" sums up all allocation bytes and minus all the memory release bytes.
#
# The "Allocation Size" sums up all allocation bytes without considering the memory release.
#
# - Distributed view
# The plugin now supports distributed view on profiling DDP with NCCL as backend.
#
# You can try it by using existing example on Azure:
#
# ::
#
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/distributed_bert
#
# .. image:: ../../_static/img/profiler_distributed_view.png
# :scale: 25 %
#
# The "Computation/Communication Overview" shows computation/communication ratio and their overlapping degree.
# From this view, User can figure out load balance issue among workers.
# For example, if the computation + overlapping time of one worker is much larger than others,
# there may be a problem of load balance or this worker may be a straggler.
#
# The "Synchronizing/Communication Overview" shows the efficiency of communication.
# "Data Transfer Time" is the time for actual data exchanging.
# "Synchronizing Time" is the time for waiting and synchronizing with other workers.
#
# If one worker’s "Synchronizing Time" is much shorter than that of other workers’,
# this worker may be a straggler which may have more computation workload than other workers’.
#
# The "Communication Operations Stats" summarizes the detailed statistics of all communication ops in each worker.
######################################################################
# Learn More
# ----------
#
# Take a look at the following documents to continue your learning,
# and feel free to open an issue `here <https://github.com/pytorch/kineto/issues>`_.
#
# - `Pytorch TensorBoard Profiler github <https://github.com/pytorch/kineto/tree/master/tb_plugin>`_
# - `torch.profiler API <https://pytorch.org/docs/master/profiler.html>`_
| 40.758621 | 150 | 0.666314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,613 | 0.886865 |
403352816f5874a59e3b9fffa9b383a34c03d749 | 311 | py | Python | imgtoch/__init__.py | hrpzcf/imgtoch | 13b59dd4c6b65b8ee17bbd22ac1133a86d34d5fb | [
"MIT"
]
| null | null | null | imgtoch/__init__.py | hrpzcf/imgtoch | 13b59dd4c6b65b8ee17bbd22ac1133a86d34d5fb | [
"MIT"
]
| null | null | null | imgtoch/__init__.py | hrpzcf/imgtoch | 13b59dd4c6b65b8ee17bbd22ac1133a86d34d5fb | [
"MIT"
]
| null | null | null | # coding: utf-8
from .__utils__ import grayscaleOf, makeImage, sortByGrayscale
NAME = "imgtoch"
VERSIONNUM = 0, 2, 3
VERSION = ".".join(map(str, VERSIONNUM))
AUTHOR = "hrpzcf"
EMAIL = "[email protected]"
WEBSITE = "https://gitee.com/hrpzcf/imgtoch"
__all__ = ["grayscaleOf", "makeImage", "sortByGrayscale"]
| 23.923077 | 62 | 0.717042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.418006 |
40339ee3fc200a5b40a0b837adca77cf33b0c95c | 4,298 | py | Python | packages/gradient_boosting_model/gradient_boosting_model/processing/validation.py | g-nightingale/testing-and-monitoring-ml-deployments | 770d2889968e7195dba1697c164b3344cff3c5ee | [
"BSD-3-Clause"
]
| 99 | 2019-11-14T11:58:51.000Z | 2022-03-19T14:23:17.000Z | packages/gradient_boosting_model/gradient_boosting_model/processing/validation.py | hoai-nguyen/testing-and-monitoring-ml-deployments | c4c0bc8d857326cc10899be6fe7c5bb03586347c | [
"BSD-3-Clause"
]
| 1 | 2020-03-05T04:08:26.000Z | 2020-03-05T04:08:26.000Z | packages/gradient_boosting_model/gradient_boosting_model/processing/validation.py | hoai-nguyen/testing-and-monitoring-ml-deployments | c4c0bc8d857326cc10899be6fe7c5bb03586347c | [
"BSD-3-Clause"
]
| 188 | 2019-12-13T16:48:23.000Z | 2022-03-29T09:25:12.000Z | import typing as t
from gradient_boosting_model.config.core import config
import numpy as np
import pandas as pd
from marshmallow import fields, Schema, ValidationError
class HouseDataInputSchema(Schema):
Alley = fields.Str(allow_none=True)
BedroomAbvGr = fields.Integer()
BldgType = fields.Str()
BsmtCond = fields.Str(allow_none=True)
BsmtExposure = fields.Str(allow_none=True)
BsmtFinSF1 = fields.Float(allow_none=True)
BsmtFinSF2 = fields.Float(allow_none=True)
BsmtFinType1 = fields.Str(allow_none=True)
BsmtFinType2 = fields.Str(allow_none=True)
BsmtFullBath = fields.Float(allow_none=True)
BsmtHalfBath = fields.Float(allow_none=True)
BsmtQual = fields.Str(allow_none=True)
BsmtUnfSF = fields.Float()
CentralAir = fields.Str()
Condition1 = fields.Str()
Condition2 = fields.Str()
Electrical = fields.Str(allow_none=True)
EnclosedPorch = fields.Integer()
ExterCond = fields.Str()
ExterQual = fields.Str()
Exterior1st = fields.Str(allow_none=True)
Exterior2nd = fields.Str(allow_none=True)
Fence = fields.Str(allow_none=True)
FireplaceQu = fields.Str(allow_none=True)
Fireplaces = fields.Integer()
Foundation = fields.Str()
FullBath = fields.Integer()
Functional = fields.Str(allow_none=True)
GarageArea = fields.Float()
GarageCars = fields.Float()
GarageCond = fields.Str(allow_none=True)
GarageFinish = fields.Str(allow_none=True)
GarageQual = fields.Str(allow_none=True)
GarageType = fields.Str(allow_none=True)
GarageYrBlt = fields.Float(allow_none=True)
GrLivArea = fields.Integer()
HalfBath = fields.Integer()
Heating = fields.Str()
HeatingQC = fields.Str()
HouseStyle = fields.Str()
Id = fields.Integer()
KitchenAbvGr = fields.Integer()
KitchenQual = fields.Str(allow_none=True)
LandContour = fields.Str()
LandSlope = fields.Str()
LotArea = fields.Integer()
LotConfig = fields.Str()
LotFrontage = fields.Float(allow_none=True)
LotShape = fields.Str()
LowQualFinSF = fields.Integer()
MSSubClass = fields.Integer()
MSZoning = fields.Str(allow_none=True)
MasVnrArea = fields.Float(allow_none=True)
MasVnrType = fields.Str(allow_none=True)
MiscFeature = fields.Str(allow_none=True)
MiscVal = fields.Integer()
MoSold = fields.Integer()
Neighborhood = fields.Str()
OpenPorchSF = fields.Integer()
OverallCond = fields.Integer()
OverallQual = fields.Integer()
PavedDrive = fields.Str()
PoolArea = fields.Integer()
PoolQC = fields.Str(allow_none=True)
RoofMatl = fields.Str()
RoofStyle = fields.Str()
SaleCondition = fields.Str()
SaleType = fields.Str(allow_none=True)
ScreenPorch = fields.Integer()
Street = fields.Str()
TotRmsAbvGrd = fields.Integer()
TotalBsmtSF = fields.Float()
Utilities = fields.Str(allow_none=True)
WoodDeckSF = fields.Integer()
YearBuilt = fields.Integer()
YearRemodAdd = fields.Integer()
YrSold = fields.Integer()
FirstFlrSF = fields.Integer()
SecondFlrSF = fields.Integer()
ThreeSsnPortch = fields.Integer()
def drop_na_inputs(*, input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model inputs for na values and filter."""
validated_data = input_data.copy()
if input_data[config.model_config.numerical_na_not_allowed].isnull().any().any():
validated_data = validated_data.dropna(
axis=0, subset=config.model_config.numerical_na_not_allowed
)
return validated_data
def validate_inputs(
*, input_data: pd.DataFrame
) -> t.Tuple[pd.DataFrame, t.Optional[dict]]:
"""Check model inputs for unprocessable values."""
# convert syntax error field names (beginning with numbers)
input_data.rename(columns=config.model_config.variables_to_rename, inplace=True)
validated_data = drop_na_inputs(input_data=input_data)
# set many=True to allow passing in a list
schema = HouseDataInputSchema(many=True)
errors = None
try:
# replace numpy nans so that Marshmallow can validate
schema.load(validated_data.replace({np.nan: None}).to_dict(orient="records"))
except ValidationError as exc:
errors = exc.messages
return validated_data, errors
| 34.66129 | 85 | 0.704281 | 2,986 | 0.694742 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.061191 |
403486f59aaf160172f092701ccd24e42088b089 | 2,458 | py | Python | pyplan_engine/classes/IOEngine.py | jorgedouglas71/pyplan-ide | 5ad0e4a2592b5f2716ff680018f717c65de140f5 | [
"MIT"
]
| 17 | 2019-12-04T19:22:19.000Z | 2021-07-28T11:17:05.000Z | pyplan_engine/classes/IOEngine.py | jorgedouglas71/pyplan-ide | 5ad0e4a2592b5f2716ff680018f717c65de140f5 | [
"MIT"
]
| 9 | 2019-12-13T15:34:43.000Z | 2022-02-10T11:43:00.000Z | pyplan_engine/classes/IOEngine.py | jorgedouglas71/pyplan-ide | 5ad0e4a2592b5f2716ff680018f717c65de140f5 | [
"MIT"
]
| 5 | 2019-12-04T15:57:06.000Z | 2021-08-20T19:59:26.000Z |
class IOEngine(object):
def __init__(self, node):
self.node = node
self.inputs = []
self.outputs = []
def release(self):
self.inputs = None
self.outputs = None
self.node = None
def updateInputs(self, names):
# remove prior outputs
for inputNode in self.inputs:
if not inputNode in names:
if self.node.model.existNode(inputNode):
self.node.model.getNode(inputNode).ioEngine.removeOutput(
self.node.identifier)
newInputs = []
for nodeId in names:
if self.node.model.existNode(nodeId):
newInputs.append(nodeId)
if not nodeId in self.inputs:
self.node.model.getNode(nodeId).ioEngine.addOutput(
self.node.identifier)
self.inputs = newInputs
def removeOutput(self, nodeId):
if nodeId in self.outputs:
self.outputs.remove(nodeId)
def removeInput(self, nodeId):
if nodeId in self.inputs:
self.inputs.remove(nodeId)
def addOutput(self, nodeId):
self.outputs.append(nodeId)
def updateNodeId(self, oldId, newId):
for inputNode in self.inputs:
if self.node.model.existNode(inputNode):
self.node.model.getNode(
inputNode).ioEngine.updateOutputId(oldId, newId)
for outputNode in self.outputs:
if self.node.model.existNode(outputNode):
self.node.model.getNode(
outputNode).ioEngine.updateInputId(oldId, newId)
def updateOnDeleteNode(self):
for inputNode in self.inputs:
if self.node.model.existNode(inputNode):
self.node.model.getNode(inputNode).ioEngine.removeOutput(
self.node.identifier)
for outputNode in self.outputs:
if self.node.model.existNode(outputNode):
self.node.model.getNode(outputNode).ioEngine.removeInput(
self.node.identifier)
def updateOutputId(self, oldId, newId):
if oldId in self.outputs:
self.outputs.remove(oldId)
self.outputs.append(newId)
def updateInputId(self, oldId, newId):
if oldId in self.inputs:
self.inputs.remove(oldId)
self.inputs.append(newId)
self.node.updateDefinitionForChangeId(oldId, newId)
| 32.342105 | 77 | 0.58869 | 2,456 | 0.999186 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.00895 |
4035dbde81734e9262f7a5d9f7fcf21b0a2fc083 | 1,006 | py | Python | RLBotPack/JoeyBot/CSharpPythonAgent/CSharpPythonAgent.py | RLMarvin/RLBotPack | c88c4111bf67d324b471ad87ad962e7bc8c2a202 | [
"MIT"
]
| 13 | 2019-05-25T20:25:51.000Z | 2022-03-19T13:36:23.000Z | RLBotPack/JoeyBot/CSharpPythonAgent/CSharpPythonAgent.py | RLMarvin/RLBotPack | c88c4111bf67d324b471ad87ad962e7bc8c2a202 | [
"MIT"
]
| 53 | 2019-06-07T13:31:59.000Z | 2022-03-28T22:53:47.000Z | RLBotPack/JoeyBot/CSharpPythonAgent/CSharpPythonAgent.py | RLMarvin/RLBotPack | c88c4111bf67d324b471ad87ad962e7bc8c2a202 | [
"MIT"
]
| 78 | 2019-06-30T08:42:13.000Z | 2022-03-23T20:11:42.000Z | import os
from rlbot.agents.base_agent import BOT_CONFIG_AGENT_HEADER
from rlbot.agents.base_dotnet_agent import BaseDotNetAgent
from rlbot.parsing.custom_config import ConfigHeader, ConfigObject
class DotNetBot(BaseDotNetAgent):
def get_port_file_path(self):
# Look for a port.cfg file in the same directory as THIS python file.
return os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__), 'port.cfg'))
def load_config(self, config_header: ConfigHeader):
self.dotnet_executable_path = config_header.getpath('dotnet_executable_path')
self.logger.info(".NET executable is configured as {}".format(self.dotnet_executable_path))
@staticmethod
def create_agent_configurations(config: ConfigObject):
params = config.get_header(BOT_CONFIG_AGENT_HEADER)
params.add_value('dotnet_executable_path', str, default=None,
description='Relative path to the executable that runs the .NET executable.')
| 43.73913 | 102 | 0.744533 | 806 | 0.801193 | 0 | 0 | 305 | 0.303181 | 0 | 0 | 228 | 0.22664 |
4036ce0b3a0763152669516459e91450d4954edb | 2,640 | py | Python | v3_experiments.py | runekaagaard/workflows | 7bb7fe3821bc33b5e82c65dda3ca61f69ee8bcfa | [
"Unlicense"
]
| null | null | null | v3_experiments.py | runekaagaard/workflows | 7bb7fe3821bc33b5e82c65dda3ca61f69ee8bcfa | [
"Unlicense"
]
| null | null | null | v3_experiments.py | runekaagaard/workflows | 7bb7fe3821bc33b5e82c65dda3ca61f69ee8bcfa | [
"Unlicense"
]
| null | null | null | # coding=utf-8
import inspect
from functools import wraps
def listify(func_s):
if callable(func_s):
return [func_s]
else:
return func_s
def parse_conditions(condition_s, args, kwargs, title):
err_msg = unicode(title) + u" nr. {} failed: {}"
for i, condition in enumerate(listify(condition_s), 1):
assert condition(*args, **
kwargs) is not False, unicode(err_msg).format(
i, unicode(inspect.getsource(condition)))
def mark_takes_no_arguments(func):
func.takes_no_arguments = True
return func
def takes_no_arguments(func):
mark_takes_no_arguments(func)
return func
def contract(pre_conditions, post_conditions):
"""
Pre is before. Post is after.
"""
def _(func):
@wraps(func)
def __(*args, **kwargs):
parse_conditions(
pre_conditions, args, kwargs, title='Preconditions')
result = func(*args, **kwargs)
parse_conditions(
post_conditions, [result], {}, title='Postconditions')
return result
return __
return _
def processing(pre_process, post_process):
"Procemanns"
def _(func):
@wraps(func)
def __(*args, **kwargs):
args, kwargs = pre_process(*args, **kwargs)
return post_process(func(*args, **kwargs))
return __
return _
@takes_no_arguments
def add_one(func):
@wraps(func)
def _(*args, **kwargs):
return func(*args, **kwargs) + 1
return _
def compose(*workflows):
def extract_kwargs(workflow, kwargs):
return {x: kwargs[x] for x in inspect.getargspec(workflow).args}
def _(*args, **kwargs):
assert len(args) == 0, "Only keywords allowed."
def __(func):
@wraps(func)
def ___(*a, **k):
return func(*a, **k)
for workflow in reversed(workflows):
if hasattr(workflow, 'takes_no_arguments'):
___ = workflow(___)
else:
___ = workflow(**extract_kwargs(workflow, kwargs))(___)
___.__doc__ += workflow.__doc__ or ""
return ___
return __
return _
someworkflow = compose(contract, processing, add_one)
print someworkflow
@someworkflow(
pre_conditions=[lambda x: x == 2],
post_conditions=lambda r: r == 15,
pre_process=lambda x: ([x + 1], {}),
post_process=lambda x: x + 1, )
def somefunc(x):
"""
Very important: x must be 2!
"""
return x + 10
print somefunc(2)
help(somefunc)
| 22.372881 | 75 | 0.576894 | 0 | 0 | 0 | 0 | 940 | 0.356061 | 0 | 0 | 213 | 0.080682 |
4037b08c119c1be84f8a39d7cd954a0ebc06a052 | 1,198 | py | Python | externals/binaryen/test/emscripten/tools/distill_asm.py | caokun8008/ckeos | 889093599eb59c90e4cbcff2817f4421302fada1 | [
"MIT"
]
| 40 | 2018-05-14T11:05:03.000Z | 2020-10-20T03:03:06.000Z | externals/binaryen/test/emscripten/tools/distill_asm.py | caokun8008/ckeos | 889093599eb59c90e4cbcff2817f4421302fada1 | [
"MIT"
]
| 4 | 2019-08-19T13:07:10.000Z | 2020-10-17T02:45:04.000Z | externals/binaryen/test/emscripten/tools/distill_asm.py | caokun8008/ckeos | 889093599eb59c90e4cbcff2817f4421302fada1 | [
"MIT"
]
| 14 | 2018-05-28T09:45:02.000Z | 2018-12-18T10:54:26.000Z | '''
Gets the core asm module out of an emscripten output file.
By default it adds a ';' to end the
var asm = ...
statement. You can add a third param to customize that. If the third param is 'swap-in', it will emit code to swap this asm module in, instead of the default one.
XXX this probably doesn't work with closure compiler advanced yet XXX
'''
import os, sys
import asm_module
infile = sys.argv[1]
outfile = sys.argv[2]
extra = sys.argv[3] if len(sys.argv) >= 4 else ';'
module = asm_module.AsmModule(infile).asm_js
if extra == 'swap-in':
# we do |var asm = | just like the original codebase, so that gets overridden anyhow (assuming global scripts).
extra = r''' (Module.asmGlobalArg, Module.asmLibraryArg, Module['buffer']);
// special fixups
asm.stackRestore(Module['asm'].stackSave()); // if this fails, make sure the original was built to be swappable (-s SWAPPABLE_ASM_MODULE=1)
// Finish swap
Module['asm'] = asm;
if (Module['onAsmSwap']) Module['onAsmSwap']();
'''
elif extra == 'just-func':
module = module[module.find('=')+1:] # strip the initial "var asm =" bit, leave just the raw module as a function
extra = ';'
open(outfile, 'w').write(module + extra)
| 32.378378 | 162 | 0.69783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 892 | 0.744574 |
4037b4c2546a2c9d2335471a4c5869528e8d4f28 | 2,399 | py | Python | apex/contrib/conv_bias_relu/conv_bias_relu.py | XL-Kong/Painter_GAN | 23cfb57638497fdd1f2d8c09728b439b0e83efde | [
"BSD-3-Clause"
]
| null | null | null | apex/contrib/conv_bias_relu/conv_bias_relu.py | XL-Kong/Painter_GAN | 23cfb57638497fdd1f2d8c09728b439b0e83efde | [
"BSD-3-Clause"
]
| null | null | null | apex/contrib/conv_bias_relu/conv_bias_relu.py | XL-Kong/Painter_GAN | 23cfb57638497fdd1f2d8c09728b439b0e83efde | [
"BSD-3-Clause"
]
| null | null | null | import torch
import pdb
from torch.autograd import gradcheck
import fused_conv_bias_relu
class ConvBiasReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, padding, stride):
outputs = fused_conv_bias_relu.forward([x, weight, bias], padding, stride)
ctx.save_for_backward(x, weight, outputs[0])
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None
class ConvBiasMaskReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, mask, padding, stride):
outputs = fused_conv_bias_relu.forward_mask([x, weight, bias, mask], padding, stride)
ctx.save_for_backward(x, weight, outputs[0])
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None, None
class ConvBias_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, padding, stride):
outputs = fused_conv_bias_relu.forward_no_relu([x, weight, bias], padding, stride)
ctx.save_for_backward(x, weight)
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward_no_relu(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None
ConvBiasReLU = ConvBiasReLU_.apply
ConvBiasMaskReLU = ConvBiasMaskReLU_.apply
ConvBias = ConvBias_.apply
| 31.155844 | 93 | 0.681951 | 2,193 | 0.914131 | 0 | 0 | 2,025 | 0.844102 | 0 | 0 | 0 | 0 |
403a8f16077fbf7702c11e3631833751b1f7fb2c | 3,367 | py | Python | data-structures/trees/trees/trees.py | bayan-alkhatib/data-structures-and-algorithms-401 | c22e0adf159392a8925f07a36b3083654d3a7002 | [
"MIT"
]
| null | null | null | data-structures/trees/trees/trees.py | bayan-alkhatib/data-structures-and-algorithms-401 | c22e0adf159392a8925f07a36b3083654d3a7002 | [
"MIT"
]
| 11 | 2021-06-14T00:14:54.000Z | 2021-06-30T00:08:30.000Z | data-structures/trees/trees/trees.py | bayan-alkhatib/data-structures-and-algorithms-401 | c22e0adf159392a8925f07a36b3083654d3a7002 | [
"MIT"
]
| 1 | 2021-07-26T11:08:29.000Z | 2021-07-26T11:08:29.000Z | class Node:
def __init__(self,value):
self.value=value
self.left=None
self.right=None
class Binary_Tree:
def __init__(self):
self.root = None
def pre_order(self):
""" root-left-right """
try:
self.values=[]
if self.root == None:
return "Tree is Empty"
def tree(node):
self.values+=[node.value]
if node.left:
tree(node.left)
if node.right:
tree(node.right)
return self.values
return tree(self.root)
except:
return "Error"
def in_order(self):
""" left-node-right"""
try:
self.values=[]
if not self.root:
return "Tree is Empty"
def tree(node):
if node.left:
tree(node.left)
self.values+=[node.value]
if node.right:
tree(node.right)
return self.values
return tree(self.root)
except:
return "Error"
def post_order(self):
""" left-right-node"""
try:
self.values=[]
if not self.root:
return "Tree is Empty"
def tree(node):
if node.left:
tree(node.left)
if node.right:
tree(node.right)
self.values+=[node.value]
return self.values
return tree(self.root)
except:
return "Error"
def max(self):
if not self.root:
return "Tree is Empty"
self.max=self.root.value
def tree(node):
if node.value>self.max:
self.max=node.value
if node.left:
tree(node.left)
if node.right:
tree(node.right)
return self.max
return tree(self.root)
class Binary_Search_Tree(Binary_Tree):
def add(self,value):
'''add value to binery tree '''
if self.root == None:
self.root = Node(value)
else:
current=self.root
while current:
if value < current.value :
if current.left == None:
current.left = Node(value)
break
current = current.left
else:
if current.right == None:
current.right = Node(value)
break
current = current.right
def Contains(self,value):
if self.root==None:
return 'Tree is Empty'
else:
current=self.root
while current:
if current.value==value:
return True
elif value < current.value :
if current.left == None:
return False
current = current.left
else:
if current.right == None:
return False
current = current.right
| 24.223022 | 51 | 0.415206 | 3,337 | 0.99109 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.057618 |
403ab8cc728f6138166c183502ef116ca738da28 | 3,037 | py | Python | ironic_inspector/cmd/dbsync.py | namnx228/ironic-inspector | fb5955bccef367af58c972718643fe5fdb18ffa5 | [
"Apache-2.0"
]
| 31 | 2015-06-23T08:06:05.000Z | 2021-11-20T05:34:32.000Z | ironic_inspector/cmd/dbsync.py | sapcc/ironic-inspector | dee8734f8ca2b0fb0acc4c56f1806237234bf55d | [
"Apache-2.0"
]
| 1 | 2019-11-22T12:07:56.000Z | 2019-11-22T12:07:59.000Z | ironic_inspector/cmd/dbsync.py | sapcc/ironic-inspector | dee8734f8ca2b0fb0acc4c56f1806237234bf55d | [
"Apache-2.0"
]
| 33 | 2015-12-02T05:27:56.000Z | 2022-02-28T07:57:43.000Z | # Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import util as alembic_util
from oslo_config import cfg
from oslo_log import log
from ironic_inspector import conf # noqa
CONF = cfg.CONF
def add_alembic_command(subparsers, name):
return subparsers.add_parser(
name, help=getattr(alembic_command, name).__doc__)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches', 'heads']:
parser = add_alembic_command(subparsers, name)
parser.set_defaults(func=do_alembic_command)
for name in ['stamp', 'show', 'edit']:
parser = add_alembic_command(subparsers, name)
parser.set_defaults(func=with_revision)
parser.add_argument('--revision', nargs='?', required=True)
parser = add_alembic_command(subparsers, 'upgrade')
parser.set_defaults(func=with_revision)
parser.add_argument('--revision', nargs='?')
parser = add_alembic_command(subparsers, 'revision')
parser.set_defaults(func=do_revision)
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
command_opt = cfg.SubCommandOpt('command',
title='Command',
help='Available commands',
handler=add_command_parsers)
def _get_alembic_config():
base_path = os.path.split(os.path.dirname(__file__))[0]
return alembic_config.Config(os.path.join(base_path, 'alembic.ini'))
def do_revision(config, cmd, *args, **kwargs):
do_alembic_command(config, cmd, message=CONF.command.message,
autogenerate=CONF.command.autogenerate)
def with_revision(config, cmd, *args, **kwargs):
revision = CONF.command.revision or 'head'
do_alembic_command(config, cmd, revision)
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def main(args=sys.argv[1:]):
log.register_options(CONF)
CONF.register_cli_opt(command_opt)
CONF(args, project='ironic-inspector')
config = _get_alembic_config()
config.set_main_option('script_location', "ironic_inspector:migrations")
config.ironic_inspector_config = CONF
CONF.command.func(config, CONF.command.name)
| 33.01087 | 76 | 0.709582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 860 | 0.283174 |
403ac1f41e289fbd9825b8c92a8b0c154ef6090e | 1,300 | py | Python | trabalhoaqui/comp_perguntas/valida.py | EmanoelG/jogodaforca | 06baf78b31e4b40d8db9fc5be67700be32c66cba | [
"MIT"
]
| 1 | 2020-06-06T17:09:55.000Z | 2020-06-06T17:09:55.000Z | trabalhoaqui/comp_perguntas/valida.py | EmanoelG/jogodaforca | 06baf78b31e4b40d8db9fc5be67700be32c66cba | [
"MIT"
]
| null | null | null | trabalhoaqui/comp_perguntas/valida.py | EmanoelG/jogodaforca | 06baf78b31e4b40d8db9fc5be67700be32c66cba | [
"MIT"
]
| null | null | null | from jogo import desenha_jogo
from random import randint
import sys
def input_cria_usuario():
usuario = dict()
usuario['nome'] = input('Informe o seu nome: ')
usuario['pontos'] = 0
usuario['desafiado'] = False
return usuario
def comeco(j1, j2):
j1 = 1
j2 = 2
n= randint(j1,j2)
escolhildo = n
return escolhildo
# mexi a aqui
def completou(acertos, pala , jogador_adivinhao):#recebe as letras acertadass e depois verifica se a palavra esta completa
if acertos == len(pala):## e aqui
print(f'\t\t\t\t\t \033[37mJogador >> {jogador_adivinhao} << venceu !\033[m')
print("""
\033[35m
_____ ___ ___ ___ _______
/ ___| / | / |/ | | ____|
| | / | / /| /| | | |__
| | _ / /| | / / |__/ | | | __|
| |_| | / ___ | / / | | | |____
\_____//_/ |_| /_/ |_| |_______|
_____ _ _ ______ ______
/ _ \ | | / / | _____| | _ |
| | | | | | / / | |__ | |_| |
| | | | | | / / | __| | _ /
| |_| | | |/ / | |____ | | \ |
\_____/ |___/ |______| |_| \_|\033[m
""")
| 23.214286 | 127 | 0.412308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 840 | 0.646154 |
403b3bdafa5f824c48528757629f8e664b7cbcd3 | 9,018 | py | Python | DesksReminder/Desks/accounts_desk.py | flopezag/fiware-management-scripts | 3e9ccdb62a11ec0ffd0747511f5512bcdb0df729 | [
"Apache-2.0"
]
| null | null | null | DesksReminder/Desks/accounts_desk.py | flopezag/fiware-management-scripts | 3e9ccdb62a11ec0ffd0747511f5512bcdb0df729 | [
"Apache-2.0"
]
| 21 | 2017-01-17T12:19:47.000Z | 2021-06-03T07:56:56.000Z | DesksReminder/Desks/accounts_desk.py | flopezag/fiware-management-scripts | 3e9ccdb62a11ec0ffd0747511f5512bcdb0df729 | [
"Apache-2.0"
]
| 1 | 2017-05-03T21:42:49.000Z | 2017-05-03T21:42:49.000Z | from datetime import date, datetime
from DesksReminder.Basics.dataFinder import Data
from DesksReminder.Basics.nickNames import ContactBook
from Config.settings import JIRA_URL
__author__ = 'Manuel Escriche'
class AccountsDesk:
def __init__(self):
self.contactBook = ContactBook()
def open(self):
messages = list()
for issue in Data().getAccountsDeskOpen():
created = datetime.strptime(issue.fields.created[:10], '%Y-%m-%d').date()
unanswered = (date.today() - created).days
if unanswered <= 1:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk : Open Issue'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed the issue {} is still OPEN, i.e. not replied for {} days.".format(issue, unanswered) +\
"\nLet me remind you of our rule to reply in the first 24 hours during working days." +\
"\nI would appreciate you spent a minute to reply to this request and to progress it " \
"on its workflow." +\
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def inProgress(self):
messages = list()
for issue in Data().getAccountsDeskInProgress():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 7:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: stalled Issue?'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} is In Progress but no update happened in the last {} days.".format(issue,
noupdated) +\
"\nI would appreciate you spent a minute to update it by reporting its progress in a comment" +\
"\n\tor if ready for analysing, please, evolve it" +\
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def scheduled(self):
messages = list()
for issue in Data().getAccountsDeskScheduled():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 7:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: stalled Issue?'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} is Scheduled but no update happened in the last {} days.".format(issue,
noupdated) +\
"\nI would appreciate you spent a minute to update it by reporting its progress in a comment" +\
"\n\tor if ready for Answered, please, evolve it" +\
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def answered(self):
messages = list()
for issue in Data().getAccountsDeskAnswered():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 7:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: Closed Issue?'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} has been Answered but no update happened in the " \
"last {} days.".format(issue, noupdated) +\
"\nI would appreciate you spent a minute to close it" \
"\n\tor if the exchange continues, please, update its progress in a comment" \
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def rejected(self):
messages = list()
for issue in Data().getAccountsDeskRejected():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 1:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: Close the procedure'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} has been Rejected.".format(issue) +\
"\nI would appreciate you spent a minute to close the procedure" \
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
if __name__ == "__main__":
pass
| 49.01087 | 120 | 0.555001 | 8,768 | 0.972278 | 0 | 0 | 0 | 0 | 0 | 0 | 2,703 | 0.299734 |
403c902e2dd03cc231fcbd2349b64917b93e7dde | 826 | py | Python | scripts/ip2hex.py | Kidlike/dotfiles | b9c4daa4da1f416662b708338a497b5a620ddcbf | [
"Apache-2.0"
]
| null | null | null | scripts/ip2hex.py | Kidlike/dotfiles | b9c4daa4da1f416662b708338a497b5a620ddcbf | [
"Apache-2.0"
]
| null | null | null | scripts/ip2hex.py | Kidlike/dotfiles | b9c4daa4da1f416662b708338a497b5a620ddcbf | [
"Apache-2.0"
]
| 1 | 2018-05-28T08:08:25.000Z | 2018-05-28T08:08:25.000Z | #!/usr/bin/python
import sys
import re
def iptohex(ip):
octets = ip.split('.')
hex_octets = []
for octet in octets:
if int(octet) < 16:
hex_octets.append('0' + hex(int(octet))[2:])
else:
hex_octets.append(hex(int(octet))[2:])
hex_octets = ''.join(hex_octets)
return hex_octets
def main():
if (len(sys.argv) != 2):
print 'Usage: ./iptohex.py x.x.x.x'
sys.exit(1)
ip = sys.argv[1]
invalidInput = re.search(r'[^0-9\.]', ip)
if invalidInput:
print 'Usage: ./iptohex.py x.x.x.x'
hex_ip = iptohex(ip)
print "Hex IP: %s " % (hex_ip)
print "Decimal IP: %s" % (ip)
if __name__ == '__main__':
main()
| 26.645161 | 68 | 0.468523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.161017 |
403cacc3c31596cf185f47bf3504df89608d6f14 | 1,329 | py | Python | src/models/CVX_weighted.py | DanqingZ/social-DCM | 3c2541a7ed0e7f4519d97783b5b673fa6c06ae94 | [
"MIT"
]
| 14 | 2017-08-10T17:00:20.000Z | 2021-12-23T09:00:50.000Z | src/models/CVX_weighted.py | DanqingZ/social-DCM | 3c2541a7ed0e7f4519d97783b5b673fa6c06ae94 | [
"MIT"
]
| null | null | null | src/models/CVX_weighted.py | DanqingZ/social-DCM | 3c2541a7ed0e7f4519d97783b5b673fa6c06ae94 | [
"MIT"
]
| 1 | 2019-08-13T08:47:43.000Z | 2019-08-13T08:47:43.000Z | import random
import numpy as np
import numpy.linalg as LA
import scipy as spy
import time
from itertools import *
import sys
import cvxpy as cvx
from random import randint
import numpy as np
import random
from scipy.sparse import csc_matrix
from scipy import sparse as sp
import networkx as nx
class CVX_weighted:
def __init__(self, X, y, b,pos_node ,temp, Lambda, Rho):
self.X = X
self.y = y
self.value = 0
self.dim = X.shape[1]
self.Lambda = Lambda
self.Rho = Rho
self.temp = temp
self.num_nodes = nx.number_of_nodes(self.temp)
self.W = np.zeros((self.dim))
self.b = b
self.pos_node = pos_node
self.P = np.zeros((self.num_nodes,self.num_nodes))
def init_P(self):
for i in self.temp.nodes_iter():
for j in self.temp.neighbors(i):
self.P[i,j] = self.temp[i][j]['pos_edge_prob']
self.P = np.diag(np.sum(self.P,1)) - self.P
def solve(self):
dim = self.X.shape[1]
w = cvx.Variable(dim)
num_nodes = nx.number_of_nodes(self.temp)
b = cvx.Variable(num_nodes)
loss = cvx.sum_entries(cvx.mul_elemwise(np.array(self.pos_node),cvx.logistic(-cvx.mul_elemwise(self.y, self.X*w+b)))) + self.Lambda*cvx.quad_form(b,self.P)
problem = cvx.Problem(cvx.Minimize(loss))
problem.solve(verbose=False)
opt = problem.value
self.W = w.value
self.b = b.value
self.value = opt | 26.58 | 157 | 0.699774 | 1,033 | 0.777276 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.011287 |
403ceb47a5257374ece3af5ee6603178afb5bfd2 | 5,704 | py | Python | experiments/colorization_cINN/data.py | jlmaccal/FrEIA | 64a04cb784e19bdff69546657f602fd31835c21f | [
"MIT"
]
| null | null | null | experiments/colorization_cINN/data.py | jlmaccal/FrEIA | 64a04cb784e19bdff69546657f602fd31835c21f | [
"MIT"
]
| null | null | null | experiments/colorization_cINN/data.py | jlmaccal/FrEIA | 64a04cb784e19bdff69546657f602fd31835c21f | [
"MIT"
]
| null | null | null | import sys
import glob
from os.path import join
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, color
from PIL import Image, ImageEnhance
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
import torch.nn.functional as F
import torchvision.transforms as T
from tqdm import tqdm
import joint_bilateral_filter as jbf
import config as c
offsets = (47.5, 2.4, 7.4)
scales = (25.6, 11.2, 16.8)
def apply_filt(args):
'''multiprocessing wrapper for applying the joint bilateral filter'''
L_i, ab_i = args
return jbf.upsample(L_i[0], ab_i, s_x=6, s_l=0.10)
def norm_lab_to_rgb(L, ab, norm=True, filt=False, bw=False):
'''given an Nx1xWxH Tensor L and an Nx2xwxh Tensor ab, normalized accoring to offsets and
scales above, upsample the ab channels and combine with L, and form an RGB image.
norm: If false, assume that L, ab are not normalized and already in the correct range
filt: Use joint bilateral upsamling to do the upsampling. Slow, but improves image quality.
bw: Simply produce a grayscale RGB, ignoring the ab channels'''
if bw:
filt=False
if filt:
with Pool(12) as p:
ab_up_list = p.map(apply_filt, [(L[i], ab[i]) for i in range(len(L))])
ab = np.stack(ab_up_list, axis=0)
ab = torch.Tensor(ab)
else:
ab = F.interpolate(ab, size=L.shape[2], mode='bilinear')
lab = torch.cat([L, ab], dim=1)
for i in range(1 + 2*norm):
lab[:, i] = lab[:, i] * scales[i] + offsets[i]
lab[:, 0].clamp_(0., 100.)
lab[:, 1:].clamp_(-128, 128)
if bw:
lab[:, 1:].zero_()
lab = lab.cpu().data.numpy()
rgb = [color.lab2rgb(np.transpose(l, (1, 2, 0))).transpose(2, 0, 1) for l in lab]
return np.array(rgb)
class LabColorDataset(Dataset):
def __init__(self, file_list, transform=None):
self.files = file_list
self.transform = transform
self.to_tensor = T.ToTensor()
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
im = Image.open(self.files[idx])
if self.transform:
im = self.transform(im)
im = self.to_tensor(im).numpy()
try:
if im.shape[0] == 1:
im = np.concatenate([im]*3, axis=0)
if im.shape[0] == 4:
im = im[:3]
im = np.transpose(im, (1,2,0))
im = color.rgb2lab(im).transpose((2, 0, 1))
for i in range(3):
im[i] = (im[i] - offsets[i]) / scales[i]
return torch.Tensor(im)
except:
return self.__getitem__(idx+1)
# Data transforms for training and test/validation set
transf = T.Compose([T.RandomHorizontalFlip(),
T.RandomResizedCrop(c.img_dims_orig[0], scale=(0.2, 1.))])
transf_test = T.Compose([T.Resize(c.img_dims_orig[0]),
T.CenterCrop(c.img_dims_orig[0])])
if c.dataset == 'imagenet':
with open('./imagenet/training_images.txt') as f:
train_list = [join('./imagenet', fname[2:]) for fname in f.read().splitlines()]
with open(c.validation_images) as f:
test_list = [ t for t in f.read().splitlines()if t[0] != '#']
test_list = [join('./imagenet', fname) for fname in test_list]
if c.val_start is not None:
test_list = test_list[c.val_start:c.val_stop]
else:
data_dir = '/home/diz/data/coco17'
complete_list = sorted(glob.glob(join(data_dir, '*.jpg')))
train_list = complete_list[64:]
test_list = complete_list[64:]
train_data = LabColorDataset(train_list,transf)
test_data = LabColorDataset(test_list, transf_test)
train_loader = DataLoader(train_data, batch_size=c.batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True)
test_loader = DataLoader(test_data, batch_size=min(64, len(test_list)), shuffle=c.shuffle_val, num_workers=4, pin_memory=True, drop_last=False)
if __name__ == '__main__':
# Determine mean and standard deviation of RGB channels
# (i.e. set global variables scale and offsets to 1., then use the results as new scale and offset)
for x in test_loader:
x_l, x_ab, _, x_ab_pred = model.prepare_batch(x)
#continue
img_gt = norm_lab_to_rgb(x_l, x_ab)
img_pred = norm_lab_to_rgb(x_l, x_ab_pred)
for i in range(c.batch_size):
plt.figure()
plt.subplot(2,2,1)
plt.imshow(img_gt[i].transpose(1,2,0))
plt.subplot(2,2,2)
plt.scatter(x_ab[i, 0].cpu().numpy().flatten() * scales[1] + offsets[1],
x_ab[i, 1].cpu().numpy().flatten() * scales[2] + offsets[2], label='gt')
plt.scatter(x_ab_pred[i, 0].cpu().numpy().flatten() * scales[1] + offsets[1],
x_ab_pred[i, 1].cpu().numpy().flatten() * scales[2] + offsets[2], label='pred')
plt.legend()
plt.subplot(2,2,3)
plt.imshow(img_pred[i].transpose(1,2,0))
plt.show()
sys.exit()
means = []
stds = []
for i, x in enumerate(train_loader):
print('\r', '%i / %i' % (i, len(train_loader)), end='')
mean = []
std = []
for i in range(3):
mean.append(x[:, i].mean().item())
std.append(x[:, i].std().item())
means.append(mean)
stds.append(std)
if i >= 1000:
break
means, stds = np.array(means), np.array(stds)
print()
print('Mean ', means.mean(axis=0))
print('Std dev', stds.mean(axis=0))
#[-0.04959071 0.03768991 0.11539354]
#[0.51175581 0.17507738 0.26179135]
| 32.971098 | 144 | 0.603086 | 864 | 0.151473 | 0 | 0 | 0 | 0 | 0 | 0 | 959 | 0.168128 |
403d3f7c3cad2d68df2456deb94e9f014798faf1 | 16,215 | py | Python | utils/editor.py | tien1504/idinvert_pytorch | 19999e9945aef4843a464930426a565256863ded | [
"MIT"
]
| 415 | 2020-04-02T03:06:47.000Z | 2022-03-28T09:32:13.000Z | utils/editor.py | tien1504/idinvert_pytorch | 19999e9945aef4843a464930426a565256863ded | [
"MIT"
]
| 52 | 2020-04-03T04:13:57.000Z | 2021-11-23T16:52:31.000Z | utils/editor.py | tien1504/idinvert_pytorch | 19999e9945aef4843a464930426a565256863ded | [
"MIT"
]
| 68 | 2020-04-03T10:08:30.000Z | 2021-10-29T20:13:45.000Z | # python 3.7
"""Utility functions for image editing from latent space."""
import os.path
import numpy as np
__all__ = [
'parse_indices', 'interpolate', 'mix_style',
'get_layerwise_manipulation_strength', 'manipulate', 'parse_boundary_list'
]
def parse_indices(obj, min_val=None, max_val=None):
"""Parses indices.
If the input is a list or tuple, this function has no effect.
The input can also be a string, which is either a comma separated list of
numbers 'a, b, c', or a dash separated range 'a - c'. Space in the string will
be ignored.
Args:
obj: The input object to parse indices from.
min_val: If not `None`, this function will check that all indices are equal
to or larger than this value. (default: None)
max_val: If not `None`, this function will check that all indices are equal
to or smaller than this field. (default: None)
Returns:
A list of integers.
Raises:
If the input is invalid, i.e., neither a list or tuple, nor a string.
"""
if obj is None or obj == '':
indices = []
elif isinstance(obj, int):
indices = [obj]
elif isinstance(obj, (list, tuple, np.ndarray)):
indices = list(obj)
elif isinstance(obj, str):
indices = []
splits = obj.replace(' ', '').split(',')
for split in splits:
numbers = list(map(int, split.split('-')))
if len(numbers) == 1:
indices.append(numbers[0])
elif len(numbers) == 2:
indices.extend(list(range(numbers[0], numbers[1] + 1)))
else:
raise ValueError(f'Invalid type of input: {type(obj)}!')
assert isinstance(indices, list)
indices = sorted(list(set(indices)))
for idx in indices:
assert isinstance(idx, int)
if min_val is not None:
assert idx >= min_val, f'{idx} is smaller than min val `{min_val}`!'
if max_val is not None:
assert idx <= max_val, f'{idx} is larger than max val `{max_val}`!'
return indices
def interpolate(src_codes, dst_codes, step=5):
"""Interpolates two sets of latent codes linearly.
Args:
src_codes: Source codes, with shape [num, *code_shape].
dst_codes: Target codes, with shape [num, *code_shape].
step: Number of interplolation steps, with source and target included. For
example, if `step = 5`, three more samples will be inserted. (default: 5)
Returns:
Interpolated codes, with shape [num, step, *code_shape].
Raises:
ValueError: If the input two sets of latent codes are with different shapes.
"""
if not (src_codes.ndim >= 2 and src_codes.shape == dst_codes.shape):
raise ValueError(f'Shapes of source codes and target codes should both be '
f'[num, *code_shape], but {src_codes.shape} and '
f'{dst_codes.shape} are received!')
num = src_codes.shape[0]
code_shape = src_codes.shape[1:]
a = src_codes[:, np.newaxis]
b = dst_codes[:, np.newaxis]
l = np.linspace(0.0, 1.0, step).reshape(
[step if axis == 1 else 1 for axis in range(a.ndim)])
results = a + l * (b - a)
assert results.shape == (num, step, *code_shape)
return results
def mix_style(style_codes,
content_codes,
num_layers=1,
mix_layers=None,
is_style_layerwise=True,
is_content_layerwise=True):
"""Mixes styles from style codes to those of content codes.
Each style code or content code consists of `num_layers` codes, each of which
is typically fed into a particular layer of the generator. This function mixes
styles by partially replacing the codes of `content_codes` from some certain
layers with those of `style_codes`.
For example, if both style code and content code are with shape [10, 512],
meaning to have 10 layers and each employs a 512-dimensional latent code. And
the 1st, 2nd, and 3rd layers are the target layers to perform style mixing.
Then the top half of the content code (with shape [3, 512]) will be replaced
by the top half of the style code (also with shape [3, 512]).
NOTE: This function also supports taking single-layer latent codes as inputs,
i.e., setting `is_style_layerwise` or `is_content_layerwise` as False. In this
case, the corresponding code will be first repeated for `num_layers` before
performing style mixing.
Args:
style_codes: Style codes, with shape [num_styles, *code_shape] or
[num_styles, num_layers, *code_shape].
content_codes: Content codes, with shape [num_contents, *code_shape] or
[num_contents, num_layers, *code_shape].
num_layers: Total number of layers in the generative model. (default: 1)
mix_layers: Indices of the layers to perform style mixing. `None` means to
replace all layers, in which case the content code will be completely
replaced by style code. (default: None)
is_style_layerwise: Indicating whether the input `style_codes` are
layer-wise codes. (default: True)
is_content_layerwise: Indicating whether the input `content_codes` are
layer-wise codes. (default: True)
num_layers
Returns:
Codes after style mixing, with shape [num_styles, num_contents, num_layers,
*code_shape].
Raises:
ValueError: If input `content_codes` or `style_codes` is with invalid shape.
"""
if not is_style_layerwise:
style_codes = style_codes[:, np.newaxis]
style_codes = np.tile(
style_codes,
[num_layers if axis == 1 else 1 for axis in range(style_codes.ndim)])
if not is_content_layerwise:
content_codes = content_codes[:, np.newaxis]
content_codes = np.tile(
content_codes,
[num_layers if axis == 1 else 1 for axis in range(content_codes.ndim)])
if not (style_codes.ndim >= 3 and style_codes.shape[1] == num_layers and
style_codes.shape[1:] == content_codes.shape[1:]):
raise ValueError(f'Shapes of style codes and content codes should be '
f'[num_styles, num_layers, *code_shape] and '
f'[num_contents, num_layers, *code_shape] respectively, '
f'but {style_codes.shape} and {content_codes.shape} are '
f'received!')
layer_indices = parse_indices(mix_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
num_styles = style_codes.shape[0]
num_contents = content_codes.shape[0]
code_shape = content_codes.shape[2:]
s = style_codes[:, np.newaxis]
s = np.tile(s, [num_contents if axis == 1 else 1 for axis in range(s.ndim)])
c = content_codes[np.newaxis]
c = np.tile(c, [num_styles if axis == 0 else 1 for axis in range(c.ndim)])
from_style = np.zeros(s.shape, dtype=bool)
from_style[:, :, layer_indices] = True
results = np.where(from_style, s, c)
assert results.shape == (num_styles, num_contents, num_layers, *code_shape)
return results
def get_layerwise_manipulation_strength(num_layers,
truncation_psi,
truncation_layers):
"""Gets layer-wise strength for manipulation.
Recall the truncation trick played on layer [0, truncation_layers):
w = truncation_psi * w + (1 - truncation_psi) * w_avg
So, when using the same boundary to manipulate different layers, layer
[0, truncation_layers) and layer [truncation_layers, num_layers) should use
different strength to eliminate the effect from the truncation trick. More
concretely, the strength for layer [0, truncation_layers) is set as
`truncation_psi`, while that for other layers are set as 1.
"""
strength = [1.0 for _ in range(num_layers)]
if truncation_layers > 0:
for layer_idx in range(0, truncation_layers):
strength[layer_idx] = truncation_psi
return strength
def manipulate(latent_codes,
boundary,
start_distance=-5.0,
end_distance=5.0,
step=21,
layerwise_manipulation=False,
num_layers=1,
manipulate_layers=None,
is_code_layerwise=False,
is_boundary_layerwise=False,
layerwise_manipulation_strength=1.0):
"""Manipulates the given latent codes with respect to a particular boundary.
Basically, this function takes a set of latent codes and a boundary as inputs,
and outputs a collection of manipulated latent codes.
For example, let `step` to be 10, `latent_codes` to be with shape [num,
*code_shape], and `boundary` to be with shape [1, *code_shape] and unit norm.
Then the output will be with shape [num, 10, *code_shape]. For each 10-element
manipulated codes, the first code is `start_distance` away from the original
code (i.e., the input) along the `boundary` direction, while the last code is
`end_distance` away. Remaining codes are linearly interpolated. Here,
`distance` is sign sensitive.
NOTE: This function also supports layer-wise manipulation, in which case the
generator should be able to take layer-wise latent codes as inputs. For
example, if the generator has 18 convolutional layers in total, and each of
which takes an independent latent code as input. It is possible, sometimes
with even better performance, to only partially manipulate these latent codes
corresponding to some certain layers yet keeping others untouched.
NOTE: Boundary is assumed to be normalized to unit norm already.
Args:
latent_codes: The input latent codes for manipulation, with shape
[num, *code_shape] or [num, num_layers, *code_shape].
boundary: The semantic boundary as reference, with shape [1, *code_shape] or
[1, num_layers, *code_shape].
start_distance: Start point for manipulation. (default: -5.0)
end_distance: End point for manipulation. (default: 5.0)
step: Number of manipulation steps. (default: 21)
layerwise_manipulation: Whether to perform layer-wise manipulation.
(default: False)
num_layers: Number of layers. Only active when `layerwise_manipulation` is
set as `True`. Should be a positive integer. (default: 1)
manipulate_layers: Indices of the layers to perform manipulation. `None`
means to manipulate latent codes from all layers. (default: None)
is_code_layerwise: Whether the input latent codes are layer-wise. If set as
`False`, the function will first repeat the input codes for `num_layers`
times before perform manipulation. (default: False)
is_boundary_layerwise: Whether the input boundary is layer-wise. If set as
`False`, the function will first repeat boundary for `num_layers` times
before perform manipulation. (default: False)
layerwise_manipulation_strength: Manipulation strength for each layer. Only
active when `layerwise_manipulation` is set as `True`. This field can be
used to resolve the strength discrepancy across layers when truncation
trick is on. See function `get_layerwise_manipulation_strength()` for
details. A tuple, list, or `numpy.ndarray` is expected. If set as a single
number, this strength will be used for all layers. (default: 1.0)
Returns:
Manipulated codes, with shape [num, step, *code_shape] if
`layerwise_manipulation` is set as `False`, or shape [num, step,
num_layers, *code_shape] if `layerwise_manipulation` is set as `True`.
Raises:
ValueError: If the input latent codes, boundary, or strength are with
invalid shape.
"""
if not (boundary.ndim >= 2 and boundary.shape[0] == 1):
raise ValueError(f'Boundary should be with shape [1, *code_shape] or '
f'[1, num_layers, *code_shape], but '
f'{boundary.shape} is received!')
if not layerwise_manipulation:
assert not is_code_layerwise
assert not is_boundary_layerwise
num_layers = 1
manipulate_layers = None
layerwise_manipulation_strength = 1.0
# Preprocessing for layer-wise manipulation.
# Parse indices of manipulation layers.
layer_indices = parse_indices(
manipulate_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
# Make latent codes layer-wise if needed.
assert num_layers > 0
if not is_code_layerwise:
x = latent_codes[:, np.newaxis]
x = np.tile(x, [num_layers if axis == 1 else 1 for axis in range(x.ndim)])
else:
x = latent_codes
if x.shape[1] != num_layers:
raise ValueError(f'Latent codes should be with shape [num, num_layers, '
f'*code_shape], where `num_layers` equals to '
f'{num_layers}, but {x.shape} is received!')
# Make boundary layer-wise if needed.
if not is_boundary_layerwise:
b = boundary
b = np.tile(b, [num_layers if axis == 0 else 1 for axis in range(b.ndim)])
else:
b = boundary[0]
if b.shape[0] != num_layers:
raise ValueError(f'Boundary should be with shape [num_layers, '
f'*code_shape], where `num_layers` equals to '
f'{num_layers}, but {b.shape} is received!')
# Get layer-wise manipulation strength.
if isinstance(layerwise_manipulation_strength, (int, float)):
s = [float(layerwise_manipulation_strength) for _ in range(num_layers)]
elif isinstance(layerwise_manipulation_strength, (list, tuple)):
s = layerwise_manipulation_strength
if len(s) != num_layers:
raise ValueError(f'Shape of layer-wise manipulation strength `{len(s)}` '
f'mismatches number of layers `{num_layers}`!')
elif isinstance(layerwise_manipulation_strength, np.ndarray):
s = layerwise_manipulation_strength
if s.size != num_layers:
raise ValueError(f'Shape of layer-wise manipulation strength `{s.size}` '
f'mismatches number of layers `{num_layers}`!')
else:
raise ValueError(f'Unsupported type of `layerwise_manipulation_strength`!')
s = np.array(s).reshape(
[num_layers if axis == 0 else 1 for axis in range(b.ndim)])
b = b * s
if x.shape[1:] != b.shape:
raise ValueError(f'Latent code shape {x.shape} and boundary shape '
f'{b.shape} mismatch!')
num = x.shape[0]
code_shape = x.shape[2:]
x = x[:, np.newaxis]
b = b[np.newaxis, np.newaxis, :]
l = np.linspace(start_distance, end_distance, step).reshape(
[step if axis == 1 else 1 for axis in range(x.ndim)])
results = np.tile(x, [step if axis == 1 else 1 for axis in range(x.ndim)])
is_manipulatable = np.zeros(results.shape, dtype=bool)
is_manipulatable[:, :, layer_indices] = True
results = np.where(is_manipulatable, x + l * b, results)
assert results.shape == (num, step, num_layers, *code_shape)
return results if layerwise_manipulation else results[:, :, 0]
def parse_boundary_list(boundary_list_path):
"""Parses boundary list.
Sometimes, a text file containing a list of boundaries will significantly
simplify image manipulation with a large amount of boundaries. This function
is used to parse boundary information from such list file.
Basically, each item in the list should be with format
`($NAME, $SPACE_TYPE): $PATH`. `DISABLE` at the beginning of the line can
disable a particular boundary.
Sample:
(age, z): $AGE_BOUNDARY_PATH
(gender, w): $GENDER_BOUNDARY_PATH
DISABLE(pose, wp): $POSE_BOUNDARY_PATH
Args:
boundary_list_path: Path to the boundary list.
Returns:
A dictionary, whose key is a two-element tuple (boundary_name, space_type)
and value is the corresponding boundary path.
Raise:
ValueError: If the given boundary list does not exist.
"""
if not os.path.isfile(boundary_list_path):
raise ValueError(f'Boundary list `boundary_list_path` does not exist!')
boundaries = {}
with open(boundary_list_path, 'r') as f:
for line in f:
if line[:len('DISABLE')] == 'DISABLE':
continue
boundary_info, boundary_path = line.strip().split(':')
boundary_name, space_type = boundary_info.strip()[1:-1].split(',')
boundary_name = boundary_name.strip()
space_type = space_type.strip().lower()
boundary_path = boundary_path.strip()
boundaries[(boundary_name, space_type)] = boundary_path
return boundaries
| 41.259542 | 80 | 0.687882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,509 | 0.586432 |
403d7ac07f1f092095ae4d7caf15898f47658901 | 96 | py | Python | venv/lib/python3.8/site-packages/pyls/_version.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
]
| 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/pyls/_version.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
]
| 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/pyls/_version.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
]
| null | null | null | /home/runner/.cache/pip/pool/24/e8/39/183700a0b2d2a9545f3da2571d82b53df290aab3a51dc229b113d16e6c | 96 | 96 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
403e17c5ec985065a02c6baa32d0dcd4699f18d1 | 1,277 | py | Python | pymoo/util/normalization.py | Electr0phile/pymoo | 652428473cc68b6d9deada3792635bc8a831b255 | [
"Apache-2.0"
]
| 1 | 2020-08-27T09:51:27.000Z | 2020-08-27T09:51:27.000Z | pymoo/util/normalization.py | Asurada2015/pymoo | 023a787d0b78813e789f170a3e94b2de85605aff | [
"Apache-2.0"
]
| null | null | null | pymoo/util/normalization.py | Asurada2015/pymoo | 023a787d0b78813e789f170a3e94b2de85605aff | [
"Apache-2.0"
]
| null | null | null | import numpy as np
def denormalize(x, x_min, x_max):
if x_max is None:
_range = 1
else:
_range = (x_max - x_min)
return x * _range + x_min
def normalize(x, x_min=None, x_max=None, return_bounds=False, estimate_bounds_if_none=True):
# if the bounds should be estimated if none do it for both
if estimate_bounds_if_none and x_min is None:
x_min = np.min(x, axis=0)
if estimate_bounds_if_none and x_max is None:
x_max = np.max(x, axis=0)
# if they are still none set them to default to avoid exception
if x_min is None:
x_min = np.zeros()
if x_max is None:
x_max = np.ones()
# calculate the denominator
denom = x_max - x_min
# we can not divide by zero -> plus small epsilon
denom += 1e-30
# normalize the actual values
N = (x - x_min) / denom
# return with or without bounds
if not return_bounds:
return N
else:
return N, x_min, x_max
def standardize(x, return_bounds=False):
mean = np.mean(x, axis=0)
std = np.std(x, axis=0)
# standardize
val = (x - mean) / std
if not return_bounds:
return val
else:
return val, mean, std
def destandardize(x, mean, std):
return (x * std) + mean
| 21.644068 | 92 | 0.617071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.211433 |
4040a877bb3e28b9851ff90970e6bf5e768e303c | 31,211 | py | Python | alembic/versions/92235b77ea53_check_new.py | go-lab/appcomposer | c2468f11b8398edc9b16e1552ac8d609d8347677 | [
"BSD-2-Clause"
]
| 1 | 2018-01-20T14:56:01.000Z | 2018-01-20T14:56:01.000Z | alembic/versions/92235b77ea53_check_new.py | go-lab/appcomposer | c2468f11b8398edc9b16e1552ac8d609d8347677 | [
"BSD-2-Clause"
]
| 25 | 2015-01-21T09:16:26.000Z | 2021-12-13T20:01:21.000Z | alembic/versions/92235b77ea53_check_new.py | go-lab/appcomposer | c2468f11b8398edc9b16e1552ac8d609d8347677 | [
"BSD-2-Clause"
]
| 3 | 2015-07-28T18:40:05.000Z | 2017-03-28T08:14:37.000Z | """Check new
Revision ID: 92235b77ea53
Revises: 381fdb66ec27
Create Date: 2017-10-14 02:38:51.007307
"""
# revision identifiers, used by Alembic.
revision = '92235b77ea53'
down_revision = '381fdb66ec27'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_ActiveTranslationMessages_category', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_datetime', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_fmt', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_from_developer', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_key', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_namespace', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_position', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_same_tool', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_taken_from_default', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_tool_id', table_name='ActiveTranslationMessages')
op.drop_index('ix_Apps_composer', table_name='Apps')
op.drop_index('ix_Apps_creation_date', table_name='Apps')
op.drop_index('ix_Apps_last_access_date', table_name='Apps')
op.drop_index('ix_Apps_modification_date', table_name='Apps')
op.drop_index('ix_Apps_name', table_name='Apps')
op.drop_index('ix_Apps_owner_id', table_name='Apps')
op.drop_index('ix_Apps_unique_id', table_name='Apps')
op.drop_index('ix_GoLabOAuthUsers_display_name', table_name='GoLabOAuthUsers')
op.drop_index('ix_GoLabOAuthUsers_email', table_name='GoLabOAuthUsers')
op.drop_index('ix_Languages_language', table_name='Languages')
op.drop_index('ix_RepositoryApps_adaptable', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_contents_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_downloaded_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_external_id', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_failing', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_failing_since', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_change', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_check', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_download_change', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_contents_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_downloaded_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_time', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_name', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_repository', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_translatable', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_url', table_name='RepositoryApps')
op.drop_index('ix_TranslatedApps_url', table_name='TranslatedApps')
op.drop_index('ix_TranslationBundles_from_developer', table_name='TranslationBundles')
op.drop_index('ix_TranslationBundles_language', table_name='TranslationBundles')
op.drop_index('ix_TranslationBundles_target', table_name='TranslationBundles')
op.drop_index('ix_TranslationCurrentActiveUsers_last_check', table_name='TranslationCurrentActiveUsers')
op.drop_index('ix_TranslationExternalSuggestions_engine', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_human_key', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_human_key_hash', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_language', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_origin_language', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationKeySuggestions_key', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationKeySuggestions_language', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationKeySuggestions_target', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationMessageHistory_category', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_datetime', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_fmt', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_from_developer', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_key', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_namespace', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_parent_translation_id', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_position', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_same_tool', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_taken_from_default', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_tool_id', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationNotificationRecipients_created', table_name='TranslationNotificationRecipients')
op.drop_index('ix_TranslationNotificationRecipients_email', table_name='TranslationNotificationRecipients')
op.drop_index('ix_TranslationSubscriptions_last_check', table_name='TranslationSubscriptions')
op.drop_index('ix_TranslationSubscriptions_mechanism', table_name='TranslationSubscriptions')
op.drop_index('ix_TranslationSyncLogs_end_datetime', table_name='TranslationSyncLogs')
op.drop_index('ix_TranslationSyncLogs_start_datetime', table_name='TranslationSyncLogs')
op.drop_index('ix_TranslationUrls_automatic', table_name='TranslationUrls')
op.drop_index('ix_TranslationUrls_url', table_name='TranslationUrls')
op.drop_index('ix_TranslationValueSuggestions_human_key', table_name='TranslationValueSuggestions')
op.drop_index('ix_TranslationValueSuggestions_language', table_name='TranslationValueSuggestions')
op.drop_index('ix_TranslationValueSuggestions_target', table_name='TranslationValueSuggestions')
op.drop_index('ix_Users_creation_date', table_name='Users')
op.drop_index('ix_Users_last_access_date', table_name='Users')
op.create_index(op.f('ix_ActiveTranslationMessages_category'), 'ActiveTranslationMessages', ['category'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_datetime'), 'ActiveTranslationMessages', ['datetime'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_fmt'), 'ActiveTranslationMessages', ['fmt'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_from_developer'), 'ActiveTranslationMessages', ['from_developer'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_key'), 'ActiveTranslationMessages', ['key'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_namespace'), 'ActiveTranslationMessages', ['namespace'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_position'), 'ActiveTranslationMessages', ['position'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_same_tool'), 'ActiveTranslationMessages', ['same_tool'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_taken_from_default'), 'ActiveTranslationMessages', ['taken_from_default'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_tool_id'), 'ActiveTranslationMessages', ['tool_id'], unique=False)
op.create_index(op.f('ix_Apps_composer'), 'Apps', ['composer'], unique=False)
op.create_index(op.f('ix_Apps_creation_date'), 'Apps', ['creation_date'], unique=False)
op.create_index(op.f('ix_Apps_last_access_date'), 'Apps', ['last_access_date'], unique=False)
op.create_index(op.f('ix_Apps_modification_date'), 'Apps', ['modification_date'], unique=False)
op.create_index(op.f('ix_Apps_name'), 'Apps', ['name'], unique=False)
op.create_index(op.f('ix_Apps_owner_id'), 'Apps', ['owner_id'], unique=False)
op.create_index(op.f('ix_Apps_unique_id'), 'Apps', ['unique_id'], unique=True)
op.create_index(op.f('ix_GoLabOAuthUsers_display_name'), 'GoLabOAuthUsers', ['display_name'], unique=False)
op.create_index(op.f('ix_GoLabOAuthUsers_email'), 'GoLabOAuthUsers', ['email'], unique=True)
op.create_index(op.f('ix_Languages_language'), 'Languages', ['language'], unique=True)
op.create_index(op.f('ix_RepositoryApps_adaptable'), 'RepositoryApps', ['adaptable'], unique=False)
op.create_index(op.f('ix_RepositoryApps_contents_hash'), 'RepositoryApps', ['contents_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_downloaded_hash'), 'RepositoryApps', ['downloaded_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_external_id'), 'RepositoryApps', ['external_id'], unique=False)
op.create_index(op.f('ix_RepositoryApps_failing_since'), 'RepositoryApps', ['failing_since'], unique=False)
op.create_index(op.f('ix_RepositoryApps_failing'), 'RepositoryApps', ['failing'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_change'), 'RepositoryApps', ['last_change'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_check'), 'RepositoryApps', ['last_check'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_download_change'), 'RepositoryApps', ['last_download_change'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_contents_hash'), 'RepositoryApps', ['last_processed_contents_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_downloaded_hash'), 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_time'), 'RepositoryApps', ['last_processed_time'], unique=False)
op.create_index(op.f('ix_RepositoryApps_name'), 'RepositoryApps', ['name'], unique=False)
op.create_index(op.f('ix_RepositoryApps_repository'), 'RepositoryApps', ['repository'], unique=False)
op.create_index(op.f('ix_RepositoryApps_translatable'), 'RepositoryApps', ['translatable'], unique=False)
op.create_index(op.f('ix_RepositoryApps_url'), 'RepositoryApps', ['url'], unique=False)
op.create_index(op.f('ix_TranslatedApps_url'), 'TranslatedApps', ['url'], unique=True)
op.create_index(op.f('ix_TranslationBundles_from_developer'), 'TranslationBundles', ['from_developer'], unique=False)
op.create_index(op.f('ix_TranslationBundles_language'), 'TranslationBundles', ['language'], unique=False)
op.create_index(op.f('ix_TranslationBundles_target'), 'TranslationBundles', ['target'], unique=False)
op.create_index(op.f('ix_TranslationCurrentActiveUsers_last_check'), 'TranslationCurrentActiveUsers', ['last_check'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_engine'), 'TranslationExternalSuggestions', ['engine'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_human_key_hash'), 'TranslationExternalSuggestions', ['human_key_hash'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_human_key'), 'TranslationExternalSuggestions', ['human_key'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_language'), 'TranslationExternalSuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_origin_language'), 'TranslationExternalSuggestions', ['origin_language'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_key'), 'TranslationKeySuggestions', ['key'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_language'), 'TranslationKeySuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_target'), 'TranslationKeySuggestions', ['target'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_category'), 'TranslationMessageHistory', ['category'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_datetime'), 'TranslationMessageHistory', ['datetime'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_fmt'), 'TranslationMessageHistory', ['fmt'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_from_developer'), 'TranslationMessageHistory', ['from_developer'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_key'), 'TranslationMessageHistory', ['key'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_namespace'), 'TranslationMessageHistory', ['namespace'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_parent_translation_id'), 'TranslationMessageHistory', ['parent_translation_id'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_position'), 'TranslationMessageHistory', ['position'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_same_tool'), 'TranslationMessageHistory', ['same_tool'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_taken_from_default'), 'TranslationMessageHistory', ['taken_from_default'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_tool_id'), 'TranslationMessageHistory', ['tool_id'], unique=False)
op.create_index(op.f('ix_TranslationNotificationRecipients_created'), 'TranslationNotificationRecipients', ['created'], unique=False)
op.create_index(op.f('ix_TranslationNotificationRecipients_email'), 'TranslationNotificationRecipients', ['email'], unique=True)
op.create_index(op.f('ix_TranslationSubscriptions_last_check'), 'TranslationSubscriptions', ['last_check'], unique=False)
op.create_index(op.f('ix_TranslationSubscriptions_mechanism'), 'TranslationSubscriptions', ['mechanism'], unique=False)
op.create_index(op.f('ix_TranslationSyncLogs_end_datetime'), 'TranslationSyncLogs', ['end_datetime'], unique=False)
op.create_index(op.f('ix_TranslationSyncLogs_start_datetime'), 'TranslationSyncLogs', ['start_datetime'], unique=False)
op.create_index(op.f('ix_TranslationUrls_automatic'), 'TranslationUrls', ['automatic'], unique=False)
op.create_index(op.f('ix_TranslationUrls_url'), 'TranslationUrls', ['url'], unique=True)
op.create_index(op.f('ix_TranslationValueSuggestions_human_key'), 'TranslationValueSuggestions', ['human_key'], unique=False)
op.create_index(op.f('ix_TranslationValueSuggestions_language'), 'TranslationValueSuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationValueSuggestions_target'), 'TranslationValueSuggestions', ['target'], unique=False)
op.create_index(op.f('ix_Users_creation_date'), 'Users', ['creation_date'], unique=False)
op.create_index(op.f('ix_Users_last_access_date'), 'Users', ['last_access_date'], unique=False)
# op.create_unique_constraint(None, 'ActiveTranslationMessages', ['bundle_id', 'key'])
# op.create_unique_constraint(None, 'RepositoryApp2languages', ['repository_app_id', 'language_id'])
# op.create_unique_constraint(None, 'TranslationBundles', ['translation_url_id', 'language', 'target'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_Users_last_access_date'), table_name='Users')
op.drop_index(op.f('ix_Users_creation_date'), table_name='Users')
op.drop_index(op.f('ix_TranslationValueSuggestions_target'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationValueSuggestions_language'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationValueSuggestions_human_key'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationUrls_url'), table_name='TranslationUrls')
op.drop_index(op.f('ix_TranslationUrls_automatic'), table_name='TranslationUrls')
op.drop_index(op.f('ix_TranslationSyncLogs_start_datetime'), table_name='TranslationSyncLogs')
op.drop_index(op.f('ix_TranslationSyncLogs_end_datetime'), table_name='TranslationSyncLogs')
op.drop_index(op.f('ix_TranslationSubscriptions_mechanism'), table_name='TranslationSubscriptions')
op.drop_index(op.f('ix_TranslationSubscriptions_last_check'), table_name='TranslationSubscriptions')
op.drop_index(op.f('ix_TranslationNotificationRecipients_email'), table_name='TranslationNotificationRecipients')
op.drop_index(op.f('ix_TranslationNotificationRecipients_created'), table_name='TranslationNotificationRecipients')
op.drop_index(op.f('ix_TranslationMessageHistory_tool_id'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_taken_from_default'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_same_tool'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_position'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_parent_translation_id'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_namespace'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_key'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_from_developer'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_fmt'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_datetime'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_category'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationKeySuggestions_target'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationKeySuggestions_language'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationKeySuggestions_key'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_origin_language'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_language'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_human_key'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_human_key_hash'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_engine'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationBundles_target'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationBundles_language'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationBundles_from_developer'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationCurrentActiveUsers_last_check'), table_name='TranslationCurrentActiveUsers')
# op.drop_constraint(None, 'TranslationBundles', type_='unique')
op.drop_index(op.f('ix_RepositoryApps_url'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_translatable'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_repository'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_name'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_time'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_downloaded_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_contents_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_download_change'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_check'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_change'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_failing'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_failing_since'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_external_id'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_downloaded_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_contents_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_adaptable'), table_name='RepositoryApps')
# op.drop_constraint(None, 'RepositoryApp2languages', type_='unique')
op.drop_index(op.f('ix_TranslatedApps_url'), table_name='TranslatedApps')
op.drop_index(op.f('ix_Languages_language'), table_name='Languages')
op.drop_index(op.f('ix_GoLabOAuthUsers_email'), table_name='GoLabOAuthUsers')
op.drop_index(op.f('ix_GoLabOAuthUsers_display_name'), table_name='GoLabOAuthUsers')
op.drop_index(op.f('ix_Apps_unique_id'), table_name='Apps')
op.drop_index(op.f('ix_Apps_owner_id'), table_name='Apps')
op.drop_index(op.f('ix_Apps_name'), table_name='Apps')
op.drop_index(op.f('ix_Apps_modification_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_last_access_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_creation_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_composer'), table_name='Apps')
# op.drop_constraint(None, 'ActiveTranslationMessages', type_='unique')
op.drop_index(op.f('ix_ActiveTranslationMessages_tool_id'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_taken_from_default'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_same_tool'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_position'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_namespace'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_key'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_from_developer'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_fmt'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_datetime'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_category'), table_name='ActiveTranslationMessages')
op.create_index('ix_Users_last_access_date', 'Users', ['last_access_date'], unique=False)
op.create_index('ix_Users_creation_date', 'Users', ['creation_date'], unique=False)
op.create_index('ix_TranslationValueSuggestions_target', 'TranslationValueSuggestions', ['target'], unique=False)
op.create_index('ix_TranslationValueSuggestions_language', 'TranslationValueSuggestions', ['language'], unique=False)
op.create_index('ix_TranslationValueSuggestions_human_key', 'TranslationValueSuggestions', ['human_key'], unique=False)
op.create_index('ix_TranslationUrls_url', 'TranslationUrls', ['url'], unique=True)
op.create_index('ix_TranslationUrls_automatic', 'TranslationUrls', ['automatic'], unique=False)
op.create_index('ix_TranslationSyncLogs_start_datetime', 'TranslationSyncLogs', ['start_datetime'], unique=False)
op.create_index('ix_TranslationSyncLogs_end_datetime', 'TranslationSyncLogs', ['end_datetime'], unique=False)
op.create_index('ix_TranslationSubscriptions_mechanism', 'TranslationSubscriptions', ['mechanism'], unique=False)
op.create_index('ix_TranslationSubscriptions_last_check', 'TranslationSubscriptions', ['last_check'], unique=False)
op.create_index('ix_TranslationNotificationRecipients_email', 'TranslationNotificationRecipients', ['email'], unique=True)
op.create_index('ix_TranslationNotificationRecipients_created', 'TranslationNotificationRecipients', ['created'], unique=False)
op.create_index('ix_TranslationMessageHistory_tool_id', 'TranslationMessageHistory', ['tool_id'], unique=False)
op.create_index('ix_TranslationMessageHistory_taken_from_default', 'TranslationMessageHistory', ['taken_from_default'], unique=False)
op.create_index('ix_TranslationMessageHistory_same_tool', 'TranslationMessageHistory', ['same_tool'], unique=False)
op.create_index('ix_TranslationMessageHistory_position', 'TranslationMessageHistory', ['position'], unique=False)
op.create_index('ix_TranslationMessageHistory_parent_translation_id', 'TranslationMessageHistory', ['parent_translation_id'], unique=False)
op.create_index('ix_TranslationMessageHistory_namespace', 'TranslationMessageHistory', ['namespace'], unique=False)
op.create_index('ix_TranslationMessageHistory_key', 'TranslationMessageHistory', ['key'], unique=False)
op.create_index('ix_TranslationMessageHistory_from_developer', 'TranslationMessageHistory', ['from_developer'], unique=False)
op.create_index('ix_TranslationMessageHistory_fmt', 'TranslationMessageHistory', ['fmt'], unique=False)
op.create_index('ix_TranslationMessageHistory_datetime', 'TranslationMessageHistory', ['datetime'], unique=False)
op.create_index('ix_TranslationMessageHistory_category', 'TranslationMessageHistory', ['category'], unique=False)
op.create_index('ix_TranslationKeySuggestions_target', 'TranslationKeySuggestions', ['target'], unique=False)
op.create_index('ix_TranslationKeySuggestions_language', 'TranslationKeySuggestions', ['language'], unique=False)
op.create_index('ix_TranslationKeySuggestions_key', 'TranslationKeySuggestions', ['key'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_origin_language', 'TranslationExternalSuggestions', ['origin_language'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_language', 'TranslationExternalSuggestions', ['language'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_human_key_hash', 'TranslationExternalSuggestions', ['human_key_hash'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_human_key', 'TranslationExternalSuggestions', ['human_key'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_engine', 'TranslationExternalSuggestions', ['engine'], unique=False)
op.create_index('ix_TranslationCurrentActiveUsers_last_check', 'TranslationCurrentActiveUsers', ['last_check'], unique=False)
op.create_index('ix_TranslationBundles_target', 'TranslationBundles', ['target'], unique=False)
op.create_index('ix_TranslationBundles_language', 'TranslationBundles', ['language'], unique=False)
op.create_index('ix_TranslationBundles_from_developer', 'TranslationBundles', ['from_developer'], unique=False)
op.create_index('ix_TranslatedApps_url', 'TranslatedApps', ['url'], unique=True)
op.create_index('ix_RepositoryApps_url', 'RepositoryApps', ['url'], unique=False)
op.create_index('ix_RepositoryApps_translatable', 'RepositoryApps', ['translatable'], unique=False)
op.create_index('ix_RepositoryApps_repository', 'RepositoryApps', ['repository'], unique=False)
op.create_index('ix_RepositoryApps_name', 'RepositoryApps', ['name'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_time', 'RepositoryApps', ['last_processed_time'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_downloaded_hash', 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_contents_hash', 'RepositoryApps', ['last_processed_contents_hash'], unique=False)
op.create_index('ix_RepositoryApps_last_download_change', 'RepositoryApps', ['last_download_change'], unique=False)
op.create_index('ix_RepositoryApps_last_check', 'RepositoryApps', ['last_check'], unique=False)
op.create_index('ix_RepositoryApps_last_change', 'RepositoryApps', ['last_change'], unique=False)
op.create_index('ix_RepositoryApps_failing_since', 'RepositoryApps', ['failing_since'], unique=False)
op.create_index('ix_RepositoryApps_failing', 'RepositoryApps', ['failing'], unique=False)
op.create_index('ix_RepositoryApps_external_id', 'RepositoryApps', ['external_id'], unique=False)
op.create_index('ix_RepositoryApps_downloaded_hash', 'RepositoryApps', ['downloaded_hash'], unique=False)
op.create_index('ix_RepositoryApps_contents_hash', 'RepositoryApps', ['contents_hash'], unique=False)
op.create_index('ix_RepositoryApps_adaptable', 'RepositoryApps', ['adaptable'], unique=False)
op.create_index('ix_Languages_language', 'Languages', ['language'], unique=True)
op.create_index('ix_GoLabOAuthUsers_email', 'GoLabOAuthUsers', ['email'], unique=True)
op.create_index('ix_GoLabOAuthUsers_display_name', 'GoLabOAuthUsers', ['display_name'], unique=False)
op.create_index('ix_Apps_unique_id', 'Apps', ['unique_id'], unique=True)
op.create_index('ix_Apps_owner_id', 'Apps', ['owner_id'], unique=False)
op.create_index('ix_Apps_name', 'Apps', ['name'], unique=False)
op.create_index('ix_Apps_modification_date', 'Apps', ['modification_date'], unique=False)
op.create_index('ix_Apps_last_access_date', 'Apps', ['last_access_date'], unique=False)
op.create_index('ix_Apps_creation_date', 'Apps', ['creation_date'], unique=False)
op.create_index('ix_Apps_composer', 'Apps', ['composer'], unique=False)
op.create_index('ix_ActiveTranslationMessages_tool_id', 'ActiveTranslationMessages', ['tool_id'], unique=False)
op.create_index('ix_ActiveTranslationMessages_taken_from_default', 'ActiveTranslationMessages', ['taken_from_default'], unique=False)
op.create_index('ix_ActiveTranslationMessages_same_tool', 'ActiveTranslationMessages', ['same_tool'], unique=False)
op.create_index('ix_ActiveTranslationMessages_position', 'ActiveTranslationMessages', ['position'], unique=False)
op.create_index('ix_ActiveTranslationMessages_namespace', 'ActiveTranslationMessages', ['namespace'], unique=False)
op.create_index('ix_ActiveTranslationMessages_key', 'ActiveTranslationMessages', ['key'], unique=False)
op.create_index('ix_ActiveTranslationMessages_from_developer', 'ActiveTranslationMessages', ['from_developer'], unique=False)
op.create_index('ix_ActiveTranslationMessages_fmt', 'ActiveTranslationMessages', ['fmt'], unique=False)
op.create_index('ix_ActiveTranslationMessages_datetime', 'ActiveTranslationMessages', ['datetime'], unique=False)
op.create_index('ix_ActiveTranslationMessages_category', 'ActiveTranslationMessages', ['category'], unique=False)
# ### end Alembic commands ###
| 90.205202 | 149 | 0.79834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19,232 | 0.616193 |
4040e2297e78d48d586c2e4b34ffa775eb46c92e | 5,633 | py | Python | build/lib/adb_utils/adb_utils.py | christopherferreira3/Python-ADB-Tools | 94e39cfe4b285517ee2502f658ab23af4ff18643 | [
"MIT"
]
| null | null | null | build/lib/adb_utils/adb_utils.py | christopherferreira3/Python-ADB-Tools | 94e39cfe4b285517ee2502f658ab23af4ff18643 | [
"MIT"
]
| null | null | null | build/lib/adb_utils/adb_utils.py | christopherferreira3/Python-ADB-Tools | 94e39cfe4b285517ee2502f658ab23af4ff18643 | [
"MIT"
]
| null | null | null | import subprocess
import os
def get_connected_devices() -> list:
"""
Returns a list of tuples containing the Device name and the android Version
:return:
"""
devices = []
devices_output = subprocess.check_output(["adb", "devices"]).decode("utf-8").strip("List of devices attached").split("\n")
for device in devices_output:
if device is None or device == "":
pass
else:
device_name = device.strip('\tdevice')
android_version = subprocess.check_output(["adb", "-s", device_name, "shell", "getprop", "ro.build.version.release"])
devices.append((device_name, android_version.decode('utf-8').strip("\r\n")))
return devices
def install_app(apk_path=None, device=None) -> bool:
"""
Installs an APK file into a device.
The app installed with the -r option so the apk gets replaced it exists or installed if it doenst
:param apk_path: Path for the APK
:param device: Device name
:return: True if success , False if fail
"""
path = os.getcwd() + apk_path if str(apk_path).startswith("/") else os.getcwd() + "/" + apk_path
if apk_path is not None and device is not None:
if os.path.isfile(path):
command = ["adb", "-s" , device, "install", "-r", path]
p = subprocess.Popen(command, stdout=None)
p.wait()
p.terminate()
print("APK {0} was installed in {1}".format(apk_path, device))
return True
else:
print("File {0} not found!".format(path))
else:
print("Device and/or apk not found or not specified")
return False
def is_device_connected(device) -> bool:
all_connected = get_connected_devices()
for device_connected, version in all_connected:
if device == device_connected:
return True
return False
def unintall_app(package=None, device=None) -> None:
"""
Uninstall an app from the device
:return:
"""
command = ["adb", "-s", device, "uninstall", package]
if package is not None:
if device is None:
command.pop(1)
command.pop(1)
p = subprocess.Popen(command, stdout=None)
p.wait()
p.terminate()
else:
print("App package was not specified.")
def is_app_installed(package=None, device=None) -> bool:
"""
Returns True if the package is installed or False if it is not
:param package:
:return:
"""
command = ["adb", "-s", device, "shell", "pm", "list", "packages |", "grep", package]
if device is None:
command.pop(1)
command.pop(1)
out = subprocess.check_output(command, stderr=None)
return True if out.decode('utf-8').strip("\r\n") == "package:{0}".format(package) else False
def run_command(arg_string=None, arg_list=None) -> None:
"""
Run a general ABD command
:return:
"""
command = arg_list if arg_list else str(arg_string).split(" ")
p = subprocess.check_output(command, stderr=None)
print(p.decode('utf-8'))
def kill_server() -> None:
"""
Kills the ADB server
:return: None
"""
command = ["adb", "kill-server"]
p = subprocess.Popen(command, stdout=None, stderr=None)
p.wait(timeout=10)
print("ADB server has been killed.")
def start_server() -> None:
"""
Starts the ADB server
:return: None
"""
command = ["adb", "start-server"]
p = subprocess.Popen(command, stderr=None, stdout=None)
p.wait(timeout=10)
print("ADB server has been started.")
def get_apk_from_device(package=None, device=None) -> bool:
"""
Retrieves the APK of an application if it exists
:param package:
:param device:
:return: bool
"""
# adb shell pm path com.example.someapp
# adb pull /data/app/com.example.someapp-2.apk path/to/desired/destination
command_apk_path = ["adb", "-s", device, "pm", "path", package]
if package is None:
print("Package is required but it was not specified.")
return False
if device is None and len(get_connected_devices()) != 1:
print("There are multiple devices connected, please specify a device to get the APK from")
return False
elif device is None:
command_apk_path.pop(1)
command_apk_path.pop(1)
apk_path = subprocess.check_output(command_apk_path, stderr=None)
# TODO: Rest of the stuff
def push_file_to_device() -> None: # For now...
"""
Pushes a file to the device
:param device:
:return: None
"""
pass
def list_files_in_device() -> None:
"""
Gets a list of files in a specific folder
:param device:
:param path:
:return: list of files
"""
pass
def unlock_device(password=None, device=None) -> bool:
"""
Unlocks a device given a device name and the password
:param password:
:param device:
:return: True is sucess, False if error
"""
command_input = ["adb", "-s", device, "shell", "input", "text", password]
command_submit = ["adb", "-s", device, "shell", "input", "keyevent", 66]
if device is None and len(get_connected_devices()) != 1:
print("No device was specified and/or multiple devices are connected")
return False
if device is None:
command_input.pop(1)
command_input.pop(1)
command_submit.pop(1)
command_submit.pop(1)
p = subprocess.Popen(command_input, stdout=None)
p.wait()
p.terminate()
p1 = subprocess.Popen(command_submit, stdout=None)
p1.wait()
p1.terminate()
return True
| 27.081731 | 129 | 0.617788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,086 | 0.370318 |
4041a6092503143d16664ce5f9772df9bdedc920 | 2,664 | py | Python | tests/unit/test_cl61d.py | griesche/cloudnetpy-1 | 0675677d1cb8dc4b09dfe5d76129df4483725fce | [
"MIT"
]
| 1 | 2021-11-16T15:23:24.000Z | 2021-11-16T15:23:24.000Z | tests/unit/test_cl61d.py | griesche/cloudnetpy-1 | 0675677d1cb8dc4b09dfe5d76129df4483725fce | [
"MIT"
]
| null | null | null | tests/unit/test_cl61d.py | griesche/cloudnetpy-1 | 0675677d1cb8dc4b09dfe5d76129df4483725fce | [
"MIT"
]
| null | null | null | import glob
import os
import sys
from tempfile import TemporaryDirectory
import netCDF4
import numpy as np
import numpy.ma as ma
from all_products_fun import Check
from lidar_fun import LidarFun
from cloudnetpy import concat_lib
from cloudnetpy.instruments import ceilo2nc
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(SCRIPT_PATH)
FILES = glob.glob(f"{SCRIPT_PATH}/data/cl61d/*.nc")
FILES.sort()
SITE_META = {
"name": "Hyytiälä",
"altitude": 123,
"calibration_factor": 2.0,
"latitude": 45.0,
"longitude": 22.0,
}
class TestCl61d(Check):
site_meta = SITE_META
date = "2021-08-29"
temp_dir = TemporaryDirectory()
daily_file = temp_dir.name + "/daily.nc"
concat_lib.concatenate_files(FILES, daily_file, concat_dimension="profile")
temp_path = temp_dir.name + "/test.nc"
uuid = ceilo2nc(daily_file, temp_path, site_meta, date=date)
def test_variable_names(self):
keys = {
"beta",
"beta_smooth",
"calibration_factor",
"range",
"height",
"zenith_angle",
"time",
"depolarisation",
"altitude",
"latitude",
"longitude",
"wavelength",
}
assert set(self.nc.variables.keys()) == keys
def test_common_lidar(self):
lidar_fun = LidarFun(self.nc, self.site_meta, self.date, self.uuid)
for name, method in LidarFun.__dict__.items():
if "test_" in name:
getattr(lidar_fun, name)()
def test_variable_values(self):
assert abs(self.nc.variables["wavelength"][:] - 910.55) < 0.001
assert self.nc.variables["zenith_angle"][:] == 3.0
assert ma.max(self.nc.variables["depolarisation"][:]) < 1
assert ma.min(self.nc.variables["depolarisation"][:]) > -0.1
def test_comments(self):
assert "SNR threshold applied: 5" in self.nc.variables["beta"].comment
def test_global_attributes(self):
assert self.nc.source == "Vaisala CL61d"
assert self.nc.title == f'CL61d ceilometer from {self.site_meta["name"]}'
def test_date_argument(tmp_path):
daily_file = str(tmp_path / "daily.nc")
test_file = str(tmp_path / "test.nc")
concat_lib.concatenate_files(FILES, daily_file, concat_dimension="profile")
ceilo2nc(daily_file, test_file, SITE_META, date="2021-08-30")
with netCDF4.Dataset(test_file) as nc:
assert len(nc.variables["time"]) == 12
assert np.all(np.diff(nc.variables["time"][:]) > 0)
assert nc.year == "2021"
assert nc.month == "08"
assert nc.day == "30"
| 30.62069 | 81 | 0.634384 | 1,581 | 0.593023 | 0 | 0 | 0 | 0 | 0 | 0 | 503 | 0.188672 |
4043eb802b57171a6cc605056ffc3abeca7f2a68 | 1,343 | py | Python | tests/functions/test_count.py | athre0z/clickhouse-sqlalchemy | d4be4a818c2fadef8eeb76a59d11ff82fc2c433a | [
"MIT"
]
| 1 | 2021-07-07T09:06:00.000Z | 2021-07-07T09:06:00.000Z | tests/functions/test_count.py | athre0z/clickhouse-sqlalchemy | d4be4a818c2fadef8eeb76a59d11ff82fc2c433a | [
"MIT"
]
| null | null | null | tests/functions/test_count.py | athre0z/clickhouse-sqlalchemy | d4be4a818c2fadef8eeb76a59d11ff82fc2c433a | [
"MIT"
]
| null | null | null | from sqlalchemy import Column, func
from clickhouse_sqlalchemy import types, Table
from tests.testcase import (
BaseAbstractTestCase, HttpSessionTestCase, NativeSessionTestCase,
)
class CountTestCaseBase(BaseAbstractTestCase):
def create_table(self):
metadata = self.metadata()
return Table(
't1', metadata,
Column('x', types.Int32, primary_key=True)
)
def test_count(self):
table = self.create_table()
self.assertEqual(
self.compile(self.session.query(func.count(table.c.x))),
'SELECT count(x) AS count_1 FROM t1'
)
def test_count_distinct(self):
table = self.create_table()
query = self.session.query(func.count(func.distinct(table.c.x)))
self.assertEqual(
self.compile(query),
'SELECT count(distinct(x)) AS count_1 FROM t1'
)
def test_count_no_column_specified(self):
table = self.create_table()
query = self.session.query(func.count()).select_from(table)
self.assertEqual(
self.compile(query),
'SELECT count(*) AS count_1 FROM t1'
)
class CountHttpTestCase(CountTestCaseBase, HttpSessionTestCase):
""" ... """
class CountNativeTestCase(CountTestCaseBase, NativeSessionTestCase):
""" ... """
| 27.408163 | 72 | 0.63589 | 1,149 | 0.855547 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.109456 |
4043f84908b97607d02cc9c6faf2b455d08e20a4 | 1,055 | py | Python | scripts/commands/html/actions/search.py | stevekineeve88/orb | 284cc78659e88e85e8773599da3bda382a8bb833 | [
"MIT"
]
| null | null | null | scripts/commands/html/actions/search.py | stevekineeve88/orb | 284cc78659e88e85e8773599da3bda382a8bb833 | [
"MIT"
]
| null | null | null | scripts/commands/html/actions/search.py | stevekineeve88/orb | 284cc78659e88e85e8773599da3bda382a8bb833 | [
"MIT"
]
| null | null | null | import click
import requests
from bs4 import BeautifulSoup
from modules.Word.managers.DictionaryManager import DictionaryManager
import re
@click.command()
@click.option('--url', help='URL to fetch from')
@click.pass_context
def search(ctx, url):
dictionary_manager: DictionaryManager = ctx.obj[DictionaryManager]
soup = BeautifulSoup(requests.get(url).text, 'html.parser')
words_list = soup.text.split()
words_found = {}
print("Starting...")
i = 1
percentage = 5
percentage_increments = 5
for word in words_list:
try:
if (i / len(words_list) * 100) > percentage:
print(f'{percentage}% read')
percentage += percentage_increments
i += 1
word = re.sub(' +', ' ', word)
if word in words_found:
words_found[word] += 1
continue
dictionary_manager.is_word(word)
words_found[word] = 1
except Exception as e:
print(f'{str(e)}: {word}')
print("Complete...")
| 30.142857 | 70 | 0.6 | 0 | 0 | 0 | 0 | 913 | 0.865403 | 0 | 0 | 112 | 0.106161 |
404408dcaaf3ec9278595ad0836bc4bc90af7ec0 | 81 | py | Python | asf_search/constants/DATASET/__init__.py | jhkennedy/Discovery-asf_search | 4ec45e8a85cd626ea92f83937df9f8f04e0f7f4f | [
"BSD-3-Clause"
]
| null | null | null | asf_search/constants/DATASET/__init__.py | jhkennedy/Discovery-asf_search | 4ec45e8a85cd626ea92f83937df9f8f04e0f7f4f | [
"BSD-3-Clause"
]
| 1 | 2021-04-01T16:30:56.000Z | 2021-04-01T16:30:56.000Z | asf_search/constants/DATASET/__init__.py | jhkennedy/Discovery-asf_search | 4ec45e8a85cd626ea92f83937df9f8f04e0f7f4f | [
"BSD-3-Clause"
]
| null | null | null | """Datasets to be used in search and related functions"""
from .DATASET import * | 27 | 57 | 0.740741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.703704 |
404411fc8cdef43afe8b983d66104ed1efd7c616 | 16,089 | py | Python | cell2cell/plotting/cci_plot.py | ckmah/cell2cell | ce18bbb63e12f9b1da8699567dec9a2a8b78f824 | [
"BSD-3-Clause"
]
| 16 | 2020-09-30T01:53:43.000Z | 2022-03-25T09:58:54.000Z | cell2cell/plotting/cci_plot.py | ckmah/cell2cell | ce18bbb63e12f9b1da8699567dec9a2a8b78f824 | [
"BSD-3-Clause"
]
| 2 | 2021-08-09T21:26:54.000Z | 2021-11-08T14:47:39.000Z | cell2cell/plotting/cci_plot.py | ckmah/cell2cell | ce18bbb63e12f9b1da8699567dec9a2a8b78f824 | [
"BSD-3-Clause"
]
| 3 | 2021-11-08T07:47:44.000Z | 2022-03-30T18:40:00.000Z | # -*- coding: utf-8 -*-
import matplotlib as mpl
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from cell2cell.clustering import compute_linkage
from cell2cell.preprocessing.manipulate_dataframes import check_symmetry
from cell2cell.plotting.aesthetics import map_colors_to_metadata
def clustermap_cci(interaction_space, method='ward', optimal_leaf=True, metadata=None, sample_col='#SampleID',
group_col='Groups', meta_cmap='gist_rainbow', colors=None, excluded_cells=None, title='',
cbar_title='CCI score', cbar_fontsize=18, filename=None, **kwargs):
'''Generates a clustermap (heatmap + dendrograms from a hierarchical
clustering) based on CCI scores of cell-cell pairs.
Parameters
----------
interaction_space : cell2cell.core.interaction_space.InteractionSpace
Interaction space that contains all a distance matrix after running the
the method compute_pairwise_cci_scores. Alternatively, this object
can be a numpy-array or a pandas DataFrame. Also, a
SingleCellInteractions or a BulkInteractions object after running
the method compute_pairwise_cci_scores.
method : str, default='ward'
Clustering method for computing a linkage as in
scipy.cluster.hierarchy.linkage
optimal_leaf : boolean, default=True
Whether sorting the leaf of the dendrograms to have a minimal distance
between successive leaves. For more information, see
scipy.cluster.hierarchy.optimal_leaf_ordering
metadata : pandas.Dataframe, default=None
Metadata associated with the cells, cell types or samples in the
matrix containing CCI scores. If None, cells will not be colored
by major groups.
sample_col : str, default='#SampleID'
Column in the metadata for the cells, cell types or samples
in the matrix containing CCI scores.
group_col : str, default='Groups'
Column in the metadata containing the major groups of cells, cell types
or samples in the matrix with CCI scores.
meta_cmap : str, default='gist_rainbow'
Name of the color palette for coloring the major groups of cells.
colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells. If colors is specified, meta_cmap will be
ignored.
excluded_cells : list, default=None
List containing cell names that are present in the interaction_space
object but that will be excluded from this plot.
title : str, default=''
Title of the clustermap.
cbar_title : str, default='CCI score'
Title for the colorbar, depending on the score employed.
cbar_fontsize : int, default=18
Font size for the colorbar title as well as labels for axes X and Y.
filename : str, default=None
Path to save the figure of the elbow analysis. If None, the figure is not
saved.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
hier : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
'''
if hasattr(interaction_space, 'distance_matrix'):
print('Interaction space detected as an InteractionSpace class')
distance_matrix = interaction_space.distance_matrix
space_type = 'class'
elif (type(interaction_space) is np.ndarray) or (type(interaction_space) is pd.core.frame.DataFrame):
print('Interaction space detected as a distance matrix')
distance_matrix = interaction_space
space_type = 'matrix'
elif hasattr(interaction_space, 'interaction_space'):
print('Interaction space detected as a Interactions class')
if not hasattr(interaction_space.interaction_space, 'distance_matrix'):
raise ValueError('First run the method compute_pairwise_interactions() in your interaction' + \
' object to generate a distance matrix.')
else:
interaction_space = interaction_space.interaction_space
distance_matrix = interaction_space.distance_matrix
space_type = 'class'
else:
raise ValueError('First run the method compute_pairwise_interactions() in your interaction' + \
' object to generate a distance matrix.')
# Drop excluded cells
if excluded_cells is not None:
df = distance_matrix.loc[~distance_matrix.index.isin(excluded_cells),
~distance_matrix.columns.isin(excluded_cells)]
else:
df = distance_matrix
# Check symmetry to get linkage
symmetric = check_symmetry(df)
if (not symmetric) & (type(interaction_space) is pd.core.frame.DataFrame):
assert set(df.index) == set(df.columns), 'The distance matrix does not have the same elements in rows and columns'
# Obtain info for generating plot
linkage = _get_distance_matrix_linkages(df=df,
kwargs=kwargs,
method=method,
optimal_ordering=optimal_leaf,
symmetric=symmetric
)
kwargs_ = kwargs.copy()
# PLOT CCI MATRIX
if space_type == 'class':
df = interaction_space.interaction_elements['cci_matrix']
else:
df = distance_matrix
if excluded_cells is not None:
df = df.loc[~df.index.isin(excluded_cells),
~df.columns.isin(excluded_cells)]
# Colors
if metadata is not None:
col_colors = map_colors_to_metadata(metadata=metadata,
ref_df=df,
colors=colors,
sample_col=sample_col,
group_col=group_col,
cmap=meta_cmap)
if not symmetric:
row_colors = col_colors
else:
row_colors = None
else:
col_colors = None
row_colors = None
# Plot hierarchical clustering (triangular)
hier = _plot_triangular_clustermap(df=df,
symmetric=symmetric,
linkage=linkage,
col_colors=col_colors,
row_colors=row_colors,
title=title,
cbar_title=cbar_title,
cbar_fontsize=cbar_fontsize,
**kwargs_)
if ~symmetric:
hier.ax_heatmap.set_xlabel('Receiver cells', fontsize=cbar_fontsize)
hier.ax_heatmap.set_ylabel('Sender cells', fontsize=cbar_fontsize)
if filename is not None:
plt.savefig(filename, dpi=300,
bbox_inches='tight')
return hier
def _get_distance_matrix_linkages(df, kwargs, method='ward', optimal_ordering=True, symmetric=None):
'''Computes linkages for the CCI matrix.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores in a form of distances (that is, smaller
values represent stronger interactions). Diagonal must be filled
by zeros.
kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
method : str, default='ward'
Clustering method for computing a linkage as in
scipy.cluster.hierarchy.linkage
optimal_ordering : boolean, default=True
Whether sorting the leaf of the dendrograms to have a minimal distance
between successive leaves. For more information, see
scipy.cluster.hierarchy.optimal_leaf_ordering
symmetric : boolean, default=None
Whether df is symmetric.
Returns
-------
linkage : ndarray
The hierarchical clustering of cells encoded as a linkage matrix.
'''
if symmetric is None:
symmetric = check_symmetry(df)
if symmetric:
if 'col_cluster' in kwargs.keys():
kwargs['row_cluster'] = kwargs['col_cluster']
if kwargs['col_cluster']:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
elif 'row_cluster' in kwargs.keys():
if kwargs['row_cluster']:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
else:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
return linkage
def _triangularize_distance_matrix(df, linkage=None, symmetric=None, **kwargs):
'''Generates a mask to plot the upper triangle of the CCI matrix.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores. Must be a symmetric matrix.
linkage : ndarray, default=None
The hierarchical clustering of cells encoded as a linkage matrix.
symmetric : boolean, default=None
Whether df is symmetric.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
mask : ndarray
Mask that contains ones in the places to be hidden in the clustermap.
Only the diagonal and the upper triangle are not masked (contain
zeros).
'''
if symmetric is None:
symmetric = check_symmetry(df)
# Triangular matrix
if symmetric:
order_map = dict()
if linkage is None:
mask = np.ones((df.shape[0], df.shape[1]))
for i in range(mask.shape[0]):
for j in range(i, mask.shape[1]):
mask[i, j] = 0
else:
# Plot hierarchical clustering for getting indexes according to linkage
hier = sns.clustermap(df,
col_linkage=linkage,
row_linkage=linkage,
**kwargs
)
plt.close()
ind_order = hier.dendrogram_col.reordered_ind
mask = np.zeros((df.shape[0], df.shape[1]))
for i, ind in enumerate(ind_order):
order_map[i] = ind
filter_list = [order_map[j] for j in range(i)]
mask[ind, filter_list] = 1
else:
mask = None
return mask
def _plot_triangular_clustermap(df, symmetric=None, linkage=None, mask=None, col_colors=None, row_colors=None,
title='', cbar_title='CCI score', cbar_fontsize=12, **kwargs):
'''Plots a triangular clustermap based on a mask.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores. Must be a symmetric matrix.
linkage : ndarray, default=None
The hierarchical clustering of cells encoded as a linkage matrix.
mask : ndarray, default=None
Mask that contains ones in the places to be hidden in the clustermap.
Only the diagonal and the upper triangle are not masked (contain
zeros). If None, a mask will be computed based on the CCI matrix
symmetry.
col_colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells in the columns.
row_colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells in the rows.
title : str, default=''
Title of the clustermap.
cbar_title : str, default='CCI score'
Title for the colorbar, depending on the score employed.
cbar_fontsize : int, default=12
Font size for the colorbar title as well as labels for axes X and Y.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
hier : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
'''
if symmetric is None:
symmetric = check_symmetry(df)
if mask is None:
mask = _triangularize_distance_matrix(df=df,
linkage=linkage,
symmetric=symmetric,
**kwargs
)
hier = sns.clustermap(df,
col_linkage=linkage,
row_linkage=linkage,
mask=mask,
col_colors=col_colors,
row_colors=row_colors,
**kwargs
)
hier = _move_xticks_triangular_clustermap(clustermap=hier,
symmetric=symmetric
)
# Title
if len(title) > 0:
hier.ax_col_dendrogram.set_title(title, fontsize=16)
# Color bar label
cbar = hier.ax_heatmap.collections[0].colorbar
cbar.ax.set_ylabel(cbar_title, fontsize=cbar_fontsize)
cbar.ax.yaxis.set_label_position("left")
return hier
def _move_xticks_triangular_clustermap(clustermap, symmetric=True):
'''Moves xticks to the diagonal when plotting a symmetric matrix
in the form of a upper triangle.
Parameters
---------
clustermap : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
symmetric : boolean, default=None
Whether the CCI matrix plotted in the clustermap is symmetric.
Returns
-------
clustermap : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance, with the xticks moved to the
diagonal if the CCI matrix was symmetric. If not, the same
input clustermap is returned, but with rotated xtick labels.
'''
if symmetric:
# Apply offset transform to all xticklabels.
clustermap.ax_row_dendrogram.set_visible(False)
clustermap.ax_heatmap.tick_params(bottom=False) # Hide xtick line
x_labels = clustermap.ax_heatmap.xaxis.get_majorticklabels()
dpi_x = clustermap.fig.dpi_scale_trans.to_values()[0]
dpi_y = clustermap.fig.dpi_scale_trans.to_values()[3]
x0 = clustermap.ax_heatmap.transData.transform(x_labels[0].get_position())
x1 = clustermap.ax_heatmap.transData.transform(x_labels[1].get_position())
ylims = clustermap.ax_heatmap.get_ylim()
bottom_points = clustermap.ax_heatmap.transData.transform((1.0, ylims[0]))[1]
for i, xl in enumerate(x_labels):
# Move labels in dx and dy points.
swap_xy = (1.0, xl.get_position()[0] + 0.5)
new_y_points = clustermap.ax_heatmap.transData.transform(swap_xy)[1]
dx = -0.5 * abs(x1[0] - x0[0]) / dpi_x
dy = (new_y_points - bottom_points) / dpi_y
offset = mpl.transforms.ScaledTranslation(dx, dy, clustermap.fig.dpi_scale_trans)
xl.set_transform(xl.get_transform() + offset)
if symmetric:
rot = 45
else:
rot = 90
va = 'center'
clustermap.ax_heatmap.set_xticklabels(clustermap.ax_heatmap.xaxis.get_majorticklabels(),
rotation=rot,
rotation_mode='anchor',
va='bottom',
ha='right') # , fontsize=9.5)
clustermap.ax_heatmap.set_yticklabels(clustermap.ax_heatmap.yaxis.get_majorticklabels(),
rotation=0,
va=va,
ha='left') # , fontsize=9.5)
return clustermap | 38.307143 | 122 | 0.605134 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,388 | 0.459196 |
40465e4dbbb9334d5135c8ffe536947ae617c71d | 1,051 | py | Python | var/spack/repos/builtin.mock/packages/gnuconfig/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
]
| null | null | null | var/spack/repos/builtin.mock/packages/gnuconfig/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
]
| 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin.mock/packages/gnuconfig/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
]
| 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
class Gnuconfig(Package):
"""
The GNU config.guess and config.sub scripts versioned by timestamp.
This package can be used as a build dependency for autotools packages that
ship a tarball with outdated config.guess and config.sub files.
"""
has_code = False
version('2021-08-14')
def install(self, spec, prefix):
config_sub = join_path(prefix, 'config.sub')
config_guess = join_path(prefix, 'config.guess')
# Create files
with open(config_sub, 'w') as f:
f.write("#!/bin/sh\necho gnuconfig version of config.sub")
with open(config_guess, 'w') as f:
f.write("#!/bin/sh\necho gnuconfig version of config.guess")
# Make executable
os.chmod(config_sub, 0o775)
os.chmod(config_guess, 0o775)
| 29.194444 | 78 | 0.669838 | 811 | 0.771646 | 0 | 0 | 0 | 0 | 0 | 0 | 598 | 0.568982 |
40472eab6c9976684dd368889d9c68536758019e | 378 | py | Python | mp4box/parsing/ctts.py | abhijeetbhagat/mp4box | 841ff0ef70c7f5a96548f47414bba69c00aa2f5e | [
"BSD-3-Clause"
]
| 7 | 2019-08-14T03:03:51.000Z | 2021-11-14T19:10:00.000Z | mp4box/parsing/ctts.py | wanyhamo/mp4box | c5c73cd37c01bd9d637f1f3ed82221065dc86d6f | [
"BSD-3-Clause"
]
| 10 | 2019-08-03T16:27:08.000Z | 2019-09-10T10:05:23.000Z | mp4box/parsing/ctts.py | abhijeetbhagat/mp4box | 841ff0ef70c7f5a96548f47414bba69c00aa2f5e | [
"BSD-3-Clause"
]
| 7 | 2019-08-19T17:58:03.000Z | 2021-03-03T07:25:54.000Z | from mp4box.box import CompositionTimeToSampleBox
def parse_ctts(reader, my_size):
version = reader.read32()
box = CompositionTimeToSampleBox(my_size, version, 0)
box.entry_count = reader.read32()
for _ in range(0, box.entry_count):
box.sample_count.append(reader.read32())
box.sample_offset.append(reader.read32())
return box
| 29.076923 | 58 | 0.693122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4047883a8f6ee83210f65d3f654ff142172cb4a8 | 24,485 | py | Python | MsLightweaverManager.py | Goobley/MsLightweaver | 6383867ba2a7ab00df947c8470b438d9eadcc321 | [
"MIT"
]
| null | null | null | MsLightweaverManager.py | Goobley/MsLightweaver | 6383867ba2a7ab00df947c8470b438d9eadcc321 | [
"MIT"
]
| 1 | 2020-05-05T13:49:54.000Z | 2021-04-29T12:41:40.000Z | MsLightweaverManager.py | Goobley/MsLightweaver | 6383867ba2a7ab00df947c8470b438d9eadcc321 | [
"MIT"
]
| null | null | null | import pickle
import numpy as np
import matplotlib.pyplot as plt
from lightweaver.rh_atoms import H_6_atom, C_atom, O_atom, OI_ord_atom, Si_atom, Al_atom, Fe_atom, FeI_atom, MgII_atom, N_atom, Na_atom, S_atom, CaII_atom
from lightweaver.atmosphere import Atmosphere, ScaleType
from lightweaver.atomic_table import DefaultAtomicAbundance
from lightweaver.atomic_set import RadiativeSet, SpeciesStateTable
from lightweaver.molecule import MolecularTable
from lightweaver.LwCompiled import LwContext
from lightweaver.utils import InitialSolution, planck, NgOptions, ConvergenceError, compute_radiative_losses, integrate_line_losses
import lightweaver.constants as Const
import lightweaver as lw
from typing import List
from copy import deepcopy
from MsLightweaverAtoms import H_6, CaII, H_6_nasa, CaII_nasa
import os
import os.path as path
import time
from radynpy.matsplotlib import OpcFile
from radynpy.utils import hydrogen_absorption
from numba import njit
from pathlib import Path
from scipy.linalg import solve
from scipy.interpolate import interp1d, PchipInterpolator
# from HydroWeno.Simulation import Grid
# from HydroWeno.Advector import Advector
# from HydroWeno.BCs import zero_grad_bc
# from HydroWeno.Weno import reconstruct_weno_nm_z
import warnings
from traceback import print_stack
from weno4 import weno4
from RadynAdvection import an_sol, an_rad_sol, an_gml_sol
import pdb
def weno4_pos(xs, xp, fp, **kwargs):
return np.exp(weno4_safe(xs, xp, np.log(fp), **kwargs))
# https://stackoverflow.com/a/21901260
import subprocess
def mslightweaver_revision():
p = Path(__file__).parent
isGitRepo = subprocess.check_output(['git', 'rev-parse', '--is-inside-work-tree'], cwd=p).decode('ascii').strip() == 'true'
if not isGitRepo:
raise ValueError('Cannot find git info.')
gitChanges = subprocess.check_output(['git', 'status', '--porcelain', '--untracked-files=no'], cwd=p).decode('ascii').strip()
if len(gitChanges) > 0:
raise ValueError('Uncommitted changes to tracked files, cannot procede:\n%s' % gitChanges)
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=p).decode('ascii').strip()
def check_write_git_revision(outputDir):
revision = mslightweaver_revision()
with open(outputDir + 'GitRevision.txt', 'w') as f:
f.write(revision)
def nr_advect(atmost, i0, eqPops, activeAtomNames, abundances):
d1 = atmost.d1[i0+1]
for a in activeAtomNames:
pop = np.zeros_like(eqPops[a])
for i in range(pop.shape[0]):
pop[i, :] = an_sol(atmost, i0, eqPops[a][i], tol=1e-8, maxIter=1000)
nTotal = d1 / (abundances.massPerH * lw.Amu) * abundances[a]
popCorrectionFactor = nTotal / pop.sum(axis=0)
print('Max Correction %s: %.2e' % (a, np.abs(1-popCorrectionFactor).max()))
pop *= popCorrectionFactor
eqPops[a][...] = pop
class CoronalIrraditation(lw.BoundaryCondition):
def __init__(self):
# NOTE(cmo): This data needs to be in (mu, toObs) order, i.e. mu[0]
# down, mu[0] up, mu[1] down...
# self.I = I1d.reshape(I1d.shape[0], -1, I1d.shape[-1])
self.I = None
def set_bc(self, I1d):
self.I = np.expand_dims(I1d, axis=2)
def compute_bc(self, atmos, spect):
# if spect.wavelength.shape[0] != self.I.shape[0]:
# result = np.ones((spect.wavelength.shape[0], spect.I.shape[1], atmos.Nz))
# else:
if self.I is None:
raise ValueError('I has not been set (CoronalIrradtion)')
result = np.copy(self.I)
return result
@njit
def time_dep_update_impl(theta, dt, Gamma, GammaPrev, n, nPrev):
Nlevel = n.shape[0]
Nspace = n.shape[1]
GammaPrev = GammaPrev if GammaPrev is not None else np.empty_like(Gamma)
Gam = np.zeros((Nlevel, Nlevel))
nk = np.zeros(Nlevel)
nPrevIter = np.zeros(Nlevel)
nCurrent = np.zeros(Nlevel)
atomDelta = 0.0
for k in range(Nspace):
nCurrent[:] = n[:, k]
nPrevIter[:] = nPrev[:, k]
Gam[...] = -theta * Gamma[:,:, k] * dt
Gam += np.eye(Nlevel)
if theta != 1.0:
nk[:] = (1.0 - theta) * dt * GammaPrev[:,:, k] @ nPrevIter + nPrevIter
else:
nk[:] = nPrevIter
nNew = np.linalg.solve(Gam, nk)
n[:, k] = nNew
atomDelta = max(atomDelta, np.nanmax(np.abs(1.0 - nCurrent / nNew)))
return atomDelta
class MsLightweaverManager:
def __init__(self, atmost, outputDir,
atoms, activeAtoms=['H', 'Ca'],
detailedH=False,
detailedHPath=None,
startingCtx=None, conserveCharge=False,
populationTransportMode='Advect',
downgoingRadiation=None,
prd=False):
# check_write_git_revision(outputDir)
self.atmost = atmost
self.outputDir = outputDir
self.conserveCharge = conserveCharge
self.abund = DefaultAtomicAbundance
self.idx = 0
self.nHTot = atmost.d1 / (self.abund.massPerH * Const.Amu)
self.prd = prd
self.updateRhoPrd = False
self.detailedH = detailedH
# NOTE(cmo): If this is None and detailedH is True then the data from
# atmost will be used, otherwise, an MsLw pickle will be loaded from
# the path.
self.detailedHPath = detailedHPath
if populationTransportMode == 'Advect':
self.advectPops = True
self.rescalePops = False
elif populationTransportMode == 'Rescale':
self.advectPops = False
self.rescalePops = True
elif populationTransportMode is None or populationTransportMode == 'None':
self.advectPops = False
self.rescalePops = False
else:
raise ValueError('Unknown populationTransportMode: %s' % populationTransportMode)
self.downgoingRadiation = downgoingRadiation
if startingCtx is not None:
self.ctx = startingCtx
args = startingCtx.arguments
self.atmos = args['atmos']
self.spect = args['spect']
self.aSet = self.spect.radSet
self.eqPops = args['eqPops']
self.upperBc = atmos.upperBc
else:
nHTot = np.copy(self.nHTot[0])
if self.downgoingRadiation:
self.upperBc = CoronalIrraditation()
else:
self.upperBc = None
self.atmos = Atmosphere.make_1d(scale=ScaleType.Geometric, depthScale=np.copy(atmost.z1[0]), temperature=np.copy(atmost.tg1[0]), vlos=np.copy(atmost.vz1[0]), vturb=np.copy(atmost.vturb), ne=np.copy(atmost.ne1[0]), nHTot=nHTot, upperBc=self.upperBc)
# self.atmos.convert_scales()
self.atmos.quadrature(5)
self.aSet = RadiativeSet(atoms)
self.aSet.set_active(*activeAtoms)
if detailedH:
self.aSet.set_detailed_static('H')
# NOTE(cmo): Radyn seems to compute the collisional rates once per
# timestep(?) and we seem to get a much better agreement for Ca
# with the CH rates when H is set to LTE for the initial timestep.
# Might be a bug in my implementation though.
self.spect = self.aSet.compute_wavelength_grid()
self.mols = MolecularTable()
if self.conserveCharge:
self.eqPops = self.aSet.iterate_lte_ne_eq_pops(self.atmos, self.mols)
else:
self.eqPops = self.aSet.compute_eq_pops(self.atmos, self.mols)
self.ctx = lw.Context(self.atmos, self.spect, self.eqPops, initSol=InitialSolution.Lte, conserveCharge=self.conserveCharge, Nthreads=12)
self.atmos.bHeat = np.ones_like(self.atmost.bheat1[0]) * 1e-20
self.atmos.hPops = self.eqPops['H']
np.save(self.outputDir + 'Wavelength.npy', self.ctx.spect.wavelength)
if self.detailedH:
self.eqPops['H'][:] = self.detailed_hydrogen_pops()
if self.downgoingRadiation:
self.upperBc.set_bc(self.downgoingRadiation.compute_downgoing_radiation(self.spect.wavelength, self.atmos))
self.ctx.depthData.fill = True
# self.opac_background()
# NOTE(cmo): Set up background
# self.opc = OpcFile('opctab_cmo_mslw.dat')
# # self.opc = OpcFile()
# opcWvl = self.opc.wavel
# self.opcWvl = opcWvl
# # NOTE(cmo): Find mapping from wavelength array to opctab array, with
# # constant background over the region of each line. Are overlaps a
# # problem here? Probably -- but let's see the spectrum in practice
# # The record to be used is the one in self.wvlIdxs + 4 due to the data
# # layout in the opctab
# self.wvlIdxs = np.ones_like(self.spect.wavelength, dtype=np.int64) * -1
# lineCores = []
# for a in self.aSet.activeSet:
# for l in a.lines:
# lineCores.append(l.lambda0 * 10)
# lineCores = np.array(lineCores)
# lineCoreIdxs = np.zeros_like(lineCores)
# for i, l in enumerate(lineCores):
# closestIdx = np.argmin(np.abs(opcWvl - l))
# lineCoreIdxs[i] = closestIdx
# for a in self.aSet.activeSet:
# for l in a.lines:
# # closestIdx = np.argmin((opcWvl - l.lambda0*10)**2)
# closestCore = np.argmin(np.abs((l.wavelength * 10)[:, None] - lineCores), axis=1)
# closestIdx = lineCoreIdxs[closestCore]
# sub = find_subarray(self.spect.wavelength, l.wavelength)
# self.wvlIdxs[sub:sub + l.wavelength.shape[0]] = closestIdx
# for i, v in enumerate(self.wvlIdxs):
# if v >= 0:
# continue
# closestIdx = np.argmin(np.abs(opcWvl - self.spect.wavelength[i]*10))
# self.wvlIdxs[i] = closestIdx
# self.opctabIdxs = self.wvlIdxs + 4
# NOTE(cmo): Compute initial background opacity
# np.save('chi.npy', self.ctx.background.chi)
# np.save('eta.npy', self.ctx.background.eta)
# np.save('sca.npy', self.ctx.background.sca)
# self.opac_background()
def initial_stat_eq(self, Nscatter=3, NmaxIter=1000, popTol=1e-3, JTol=3e-3):
if self.prd:
self.ctx.update_hprd_coeffs()
for i in range(NmaxIter):
dJ = self.ctx.formal_sol_gamma_matrices()
if i < Nscatter:
continue
delta = self.ctx.stat_equil()
if self.prd:
self.ctx.prd_redistribute()
if self.ctx.crswDone and dJ < JTol and delta < popTol:
print('Stat eq converged in %d iterations' % (i+1))
break
else:
raise ConvergenceError('Stat Eq did not converge.')
def advect_pops(self):
if self.rescalePops:
adv = self.atmost.d1[self.idx+1] / self.atmost.d1[self.idx]
neAdv = self.atmos.ne * adv
self.atmos.ne[:] = neAdv
for atom in self.aSet.activeAtoms:
p = self.eqPops[atom.element]
for i in range(p.shape[0]):
pAdv = p[i] * adv
p[i, :] = pAdv
elif self.advectPops:
nr_advect(self.atmost, self.idx, self.eqPops, [a.element for a in self.aSet.activeAtoms], self.abund)
# NOTE(cmo): Guess advected n_e. Will be corrected to be self
# consistent later (in update_deps if conserveCharge=True). If
# conserveCharge isn't true then we're using loaded n_e anyway
# neAdv = interp1d(z0Tracer, np.log10(self.atmos.ne), kind=3, fill_value='extrapolate')(z1)
# self.atmos.ne[:] = 10**neAdv
def detailed_hydrogen_pops(self):
if not self.detailedH:
raise ValueError('Detailed H pops called without detailedH==True')
if self.detailedHPath:
with open(self.detailedHPath + '/Step_%.6d.pickle' % self.idx, 'rb') as pkl:
step = pickle.load(pkl)
pops = step['eqPops']['H']['n']
else:
pops = self.atmost.nh1[self.idx, :] / (np.sum(self.atmost.nh1[self.idx, :], axis=0) / self.atmos.nHTot)[None, :]
return pops
def detailed_ne(self):
if not self.detailedH:
raise ValueError('Detailed ne called without detailedH==True')
if self.detailedHPath:
with open(self.detailedHPath + '/Step_%.6d.pickle' % self.idx, 'rb') as pkl:
step = pickle.load(pkl)
ne = step['ne']
else:
ne = self.atmost.ne1[self.idx]
return ne
def save_timestep(self):
i = self.idx
with open(self.outputDir + 'Step_%.6d.pickle' % i, 'wb') as pkl:
eqPops = distill_pops(self.eqPops)
Iwave = self.ctx.spect.I
lines = []
for a in self.aSet.activeAtoms:
lines += self.aSet[a.element].lines
losses = compute_radiative_losses(self.ctx)
lineLosses = integrate_line_losses(self.ctx, losses, lines, extendGridNm=5.0)
pickle.dump({'eqPops': eqPops, 'Iwave': Iwave,
'ne': self.atmos.ne, 'lines': lines,
'losses': lineLosses}, pkl)
def load_timestep(self, stepNum):
with open(self.outputDir + 'Step_%.6d.pickle' % stepNum, 'rb') as pkl:
step = pickle.load(pkl)
self.idx = stepNum
self.atmos.temperature[:] = self.atmost.tg1[self.idx]
self.atmos.vlos[:] = self.atmost.vz1[self.idx]
if not self.conserveCharge:
self.atmos.ne[:] = self.detailed_ne()
if self.advectPops or self.rescalePops:
self.atmos.nHTot[:] = self.nHTot[self.idx]
self.atmos.bHeat[:] = self.atmost.bheat1[self.idx]
self.atmos.height[:] = self.atmost.z1[self.idx]
for name, pops in step['eqPops'].items():
if pops['n'] is not None:
self.eqPops.atomicPops[name].pops[:] = pops['n']
self.eqPops.atomicPops[name].nStar[:] = pops['nStar']
self.atmos.ne[:] = step['ne']
self.ctx.spect.I[:] = step['Iwave']
self.ctx.update_deps()
def increment_step(self):
self.advect_pops()
self.idx += 1
self.atmos.temperature[:] = self.atmost.tg1[self.idx]
self.atmos.vlos[:] = self.atmost.vz1[self.idx]
if not self.conserveCharge:
self.atmos.ne[:] = self.detailed_ne()
if self.advectPops or self.rescalePops:
self.atmos.nHTot[:] = self.nHTot[self.idx]
self.atmos.bHeat[:] = self.atmost.bheat1[self.idx]
if self.detailedH:
self.eqPops['H'][:] = self.detailed_hydrogen_pops()
self.atmos.height[:] = self.atmost.z1[self.idx]
self.ctx.update_deps()
if self.prd:
self.ctx.update_hprd_coeffs()
self.updateRhoPrd = False
self.interp_rho_prd()
if self.downgoingRadiation:
self.upperBc.set_bc(self.downgoingRadiation.compute_downgoing_radiation(self.spect.wavelength, self.atmos))
# self.opac_background()
def interp_rho_prd(self):
prevIdx = self.idx - 1
prevZ = self.atmost.z1[prevIdx]
z = self.atmost.z1[self.idx]
for atom in self.ctx.activeAtoms:
for trans in atom.trans:
try:
trans.rhoPrd
for la in range(trans.rhoPrd.shape[0]):
trans.rhoPrd[la, :] = weno4(z, prevZ, trans.rhoPrd[la])
trans.rhoPrd[trans.rhoPrd < 0] = 1e-5
except AttributeError:
pass
def time_dep_prev_state(self, evalGamma=False):
if evalGamma:
self.ctx.formal_sol_gamma_matrices()
s = {}
s['pops'] = [np.copy(a.n) for a in self.ctx.activeAtoms]
s['Gamma'] = [np.copy(a.Gamma) if evalGamma else None for a in self.ctx.activeAtoms]
return s
def time_dep_update(self, dt, prevState, theta=0.5):
atoms = self.ctx.activeAtoms
Nspace = self.atmos.Nspace
maxDelta = 0.0
for i, atom in enumerate(atoms):
atomDelta = time_dep_update_impl(theta, dt, atom.Gamma, prevState['Gamma'][i],
atom.n, prevState['pops'][i])
maxDelta = max(maxDelta, atomDelta)
s = ' %s delta = %6.4e' % (atom.atomicModel.element, atomDelta)
print(s)
return maxDelta
def time_dep_step(self, nSubSteps=200, popsTol=1e-3, JTol=3e-3, theta=1.0, dt=None):
dt = dt if dt is not None else self.atmost.dt[self.idx+1]
dNrPops = 0.0
underTol = False
# self.ctx.spect.J[:] = 0.0
if self.prd:
for atom in self.ctx.activeAtoms:
for t in atom.trans:
t.recompute_gII()
prevState = self.time_dep_prev_state(evalGamma=(theta!=1.0))
for sub in range(nSubSteps):
if self.updateRhoPrd and sub > 0:
dRho, prdIter = self.ctx.prd_redistribute(maxIter=10, tol=popsTol)
dJ = self.ctx.formal_sol_gamma_matrices()
delta = self.time_dep_update(dt, prevState, theta=theta)
if self.conserveCharge:
dNrPops = self.ctx.nr_post_update(timeDependentData={'dt': dt, 'nPrev': prevState['pops']})
if sub > 1 and ((delta < popsTol and dJ < JTol and dNrPops < popsTol)
or (delta < 0.1*popsTol and dNrPops < 0.1*popsTol)):
if self.prd:
if self.updateRhoPrd and dRho < JTol:
break
else:
print('Starting PRD Iterations')
self.updateRhoPrd = True
else:
break
else:
raise ValueError('NON-CONVERGED')
def cont_fn_data(self, step):
self.load_timestep(step)
self.ctx.depthData.fill = True
dJ = 1.0
while dJ > 1e-5:
dJ = self.ctx.formal_sol_gamma_matrices()
self.ctx.depthData.fill = False
J = np.copy(self.ctx.spect.J)
sourceData = {'chi': np.copy(self.ctx.depthData.chi),
'eta': np.copy(self.ctx.depthData.eta),
'chiBg': np.copy(self.ctx.background.chi),
'etaBg': np.copy(self.ctx.background.eta),
'scaBg': np.copy(self.ctx.background.sca),
'J': J
}
return sourceData
def rf_k(self, step, dt, pertSize, k, Jstart=None):
self.load_timestep(step)
print(pertSize)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
if Jstart is None:
dJ = 1.0
while dJ > 1e-3:
dJ = self.ctx.formal_sol_gamma_matrices()
Jstart = np.copy(self.ctx.spect.J)
self.atmos.temperature[k] += 0.5 * pertSize
self.ctx.update_deps()
self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0)
plus = np.copy(self.ctx.spect.I[:, -1])
self.load_timestep(step)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
self.atmos.temperature[k] -= 0.5 * pertSize
self.ctx.update_deps()
# if Jstart is None:
# dJ = 1.0
# while dJ > 1e-3:
# dJ = self.ctx.formal_sol_gamma_matrices()
self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0)
minus = np.copy(self.ctx.spect.I[:, -1])
return plus, minus
def rf_k_stat_eq(self, step, dt, pertSize, k, Jstart=None):
self.load_timestep(step)
print(pertSize)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
if Jstart is None:
dJ = 1.0
while dJ > 1e-3:
dJ = self.ctx.formal_sol_gamma_matrices()
Jstart = np.copy(self.ctx.spect.J)
self.atmos.temperature[k] += 0.5 * pertSize
self.ctx.update_deps()
# self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0)
while True:
self.ctx.formal_sol_gamma_matrices()
dPops = self.ctx.stat_equil()
if dPops < 1e-5 and dPops != 0.0:
break
plus = np.copy(self.ctx.spect.I[:, -1])
self.load_timestep(step)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
self.atmos.temperature[k] -= 0.5 * pertSize
self.ctx.update_deps()
# if Jstart is None:
# dJ = 1.0
# while dJ > 1e-3:
# dJ = self.ctx.formal_sol_gamma_matrices()
# self.time_dep_step(popsTol=1e-4, JTol=5e-3, dt=dt, theta=1.0)
while True:
self.ctx.formal_sol_gamma_matrices()
dPops = self.ctx.stat_equil()
if dPops < 1e-5 and dPops != 0.0:
break
minus = np.copy(self.ctx.spect.I[:, -1])
return plus, minus
def rf_ne_k(self, step, dt, pertSizePercent, k, Jstart=None):
self.load_timestep(step)
print(pertSizePercent)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
if Jstart is None:
dJ = 1.0
while dJ > 1e-3:
dJ = self.ctx.formal_sol_gamma_matrices()
Jstart = np.copy(self.ctx.spect.J)
self.atmos.ne[k] += 0.5 * pertSizePercent * self.atmos.ne[k]
self.ctx.update_deps()
self.time_dep_step(popsTol=1e-3, JTol=5e-3, dt=dt, theta=1.0)
plus = np.copy(self.ctx.spect.I[:, -1])
self.load_timestep(step)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
self.atmos.ne[k] -= 0.5 * pertSizePercent * self.atmos.ne[k]
self.ctx.update_deps()
# if Jstart is None:
# dJ = 1.0
# while dJ > 1e-3:
# dJ = self.ctx.formal_sol_gamma_matrices()
self.time_dep_step(popsTol=1e-3, JTol=5e-3, dt=dt, theta=1.0)
minus = np.copy(self.ctx.spect.I[:, -1])
return plus, minus
def rf_vlos_k(self, step, dt, pertSize, k, Jstart=None):
self.load_timestep(step)
print(pertSize)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
if Jstart is None:
dJ = 1.0
while dJ > 1e-3:
dJ = self.ctx.formal_sol_gamma_matrices()
Jstart = np.copy(self.ctx.spect.J)
self.atmos.vlos[k] += 0.5 * pertSize
self.ctx.update_deps()
self.time_dep_step(popsTol=1e-3, JTol=5e-3, dt=dt, theta=1.0)
plus = np.copy(self.ctx.spect.I[:, -1])
self.load_timestep(step)
self.ctx.clear_ng()
if Jstart is not None:
self.ctx.spect.J[:] = Jstart
else:
self.ctx.spect.J[:] = 0.0
self.atmos.vlos[k] -= 0.5 * pertSize
self.ctx.update_deps()
# if Jstart is None:
# dJ = 1.0
# while dJ > 1e-3:
# dJ = self.ctx.formal_sol_gamma_matrices()
self.time_dep_step(popsTol=1e-3, JTol=5e-3, dt=dt, theta=1.0)
minus = np.copy(self.ctx.spect.I[:, -1])
return plus, minus
def convert_atomic_pops(atom):
d = {}
if atom.pops is not None:
d['n'] = atom.pops
else:
d['n'] = atom.pops
d['nStar'] = atom.nStar
d['radiativeRates'] = atom.radiativeRates
return d
def distill_pops(eqPops):
d = {}
for atom in eqPops.atomicPops:
d[atom.element.name] = convert_atomic_pops(atom)
return d
| 37.611367 | 260 | 0.575128 | 20,407 | 0.833449 | 0 | 0 | 830 | 0.033898 | 0 | 0 | 4,380 | 0.178885 |
404999f8afb17c0ba6be91ab0f875db288f28bae | 1,895 | py | Python | common/writeExcel.py | lixiaofeng1993/DjangoBlog | 94d062324367b8a30edf8d29e2e661c822bcb7c1 | [
"MIT"
]
| null | null | null | common/writeExcel.py | lixiaofeng1993/DjangoBlog | 94d062324367b8a30edf8d29e2e661c822bcb7c1 | [
"MIT"
]
| 6 | 2020-06-06T00:44:08.000Z | 2022-01-13T01:52:46.000Z | common/writeExcel.py | lixiaofeng1993/DjangoBlog | 94d062324367b8a30edf8d29e2e661c822bcb7c1 | [
"MIT"
]
| null | null | null | # coding:utf-8
from openpyxl import load_workbook
import openpyxl
from openpyxl.styles import Font, colors
def copy_excel(cese_path, report_path):
"""
复制测试用例到report_path
:param cese_path:
:param report_path:
:return:
"""
wb2 = openpyxl.Workbook()
wb2.save(report_path) # 在设置的路径下创建一个excel文件
# 读取数据
wb1 = openpyxl.load_workbook(cese_path)
wb2 = openpyxl.load_workbook(report_path)
sheets1 = wb1.sheetnames
sheets2 = wb2.sheetnames
sheet1 = wb1[sheets1[0]] # 获取第一个sheet页
sheet2 = wb2[sheets2[0]]
max_row = sheet1.max_row # 最大行数
max_column = sheet1.max_column # 最大列数
for m in list(range(1, max_row + 1)):
for n in list(range(97, 97 + max_column)): # chr(97)='a'
n = chr(n) # ASCII字符,excel文件的列 a b c
i = '%s%d' % (n, m) # 单元格编号
cell1 = sheet1[i].value # 获取测试用例单元格数据
sheet2[i].value = cell1 # 赋值到测试结果单元格
wb2.save(report_path) # 保存数据
wb1.close() # 关闭excel
wb2.close()
class Write_excel(object):
"""
修改excel数据
"""
def __init__(self, filename):
self.filename = filename
self.wb = load_workbook(self.filename)
self.ws = self.wb.active # 激活sheet
def write(self, row_n, col_n, value):
"""写入数据,如(2,3,"hello"),第二行第三列写入数据"hello\""""
ft = Font(color=colors.RED, size=12, bold=True)
# 判断值为错误时添加字体样式
if value in ['fail', 'error'] or col_n == 12:
self.ws.cell(row_n, col_n).font = ft
if value == 'pass':
ft = Font(color=colors.GREEN)
self.ws.cell(row_n, col_n).font = ft
self.ws.cell(row_n, col_n).value = value
self.wb.save(self.filename)
if __name__ == "__main__":
# copy_excel("demo_api_3.xlsx", "test111.xlsx")
wt = Write_excel("test111.xlsx")
wt.write(4, 5, "HELLEOP")
wt.write(4, 6, "HELLEOP")
| 28.712121 | 65 | 0.601055 | 770 | 0.363379 | 0 | 0 | 0 | 0 | 0 | 0 | 674 | 0.318075 |
404a28cfcd9e972210d8ead53be99918c37812fc | 1,904 | py | Python | test/cts/tool/CTSConverter/src/nn/specs/V1_1/depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py | zhaoming0/webml-polyfill | 56cf96eff96665da0f5fd7ef86fd5748f4bd22b9 | [
"Apache-2.0"
]
| 255 | 2020-05-22T07:45:29.000Z | 2022-03-29T23:58:22.000Z | test/cts/tool/CTSConverter/src/nn/specs/V1_1/depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py | zhaoming0/webml-polyfill | 56cf96eff96665da0f5fd7ef86fd5748f4bd22b9 | [
"Apache-2.0"
]
| 5,102 | 2020-05-22T07:48:33.000Z | 2022-03-31T23:43:39.000Z | test/cts/tool/CTSConverter/src/nn/specs/V1_1/depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py | ibelem/webml-polyfill | aaf1ba4f5357eaf6e89bf9990f5bdfb543cd2bc2 | [
"Apache-2.0"
]
| 120 | 2020-05-22T07:51:08.000Z | 2022-02-16T19:08:05.000Z | #
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
b1 = Input("op3", "TENSOR_FLOAT32", "{4}")
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
cm = Int32Scalar("channelMultiplier", 2)
output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
model = model.Operation("DEPTHWISE_CONV_2D",
i1, f1, b1,
pad0, pad0, pad0, pad0,
stride, stride,
cm, act).To(output)
model = model.RelaxedExecution(True)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[10, 21, 10, 22, 10, 23,
10, 24, 10, 25, 10, 26,
10, 27, 10, 28, 10, 29],
f1:
[.25, 0, .2, 0,
.25, 0, 0, .3,
.25, 0, 0, 0,
.25, .1, 0, 0],
b1:
[1, 2, 3, 4]}
# (i1 (conv) f1) + b1
# filter usage:
# in_ch1 * f_1 --> output_d1
# in_ch1 * f_2 --> output_d2
# in_ch2 * f_3 --> output_d3
# in_ch3 * f_4 --> output_d4
output0 = {output: # output 0
[11, 3, 7.2, 10.6,
11, 3, 7.4, 10.9,
11, 3, 7.8, 11.5,
11, 3, 8.0, 11.8]}
# Instantiate an example
Example((input0, output0))
| 31.733333 | 74 | 0.56355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,012 | 0.531513 |
404a73f48e1b3ca8bb85958c0c604a1931f4d34f | 1,450 | py | Python | jina/executors/evaluators/rank/recall.py | sdsd0101/jina | 1a835d9015c627a2cbcdc58ee3d127962ada1bc9 | [
"Apache-2.0"
]
| 2 | 2020-10-19T17:06:19.000Z | 2020-10-22T14:10:55.000Z | jina/executors/evaluators/rank/recall.py | ayansiddiqui007/jina | 2a764410de47cc11e53c8f652ea1095d5dab5435 | [
"Apache-2.0"
]
| null | null | null | jina/executors/evaluators/rank/recall.py | ayansiddiqui007/jina | 2a764410de47cc11e53c8f652ea1095d5dab5435 | [
"Apache-2.0"
]
| null | null | null | from typing import Sequence, Any
from jina.executors.evaluators.rank import BaseRankingEvaluator
from jina.executors.evaluators.decorators import as_aggregator
class RecallEvaluator(BaseRankingEvaluator):
"""A :class:`RecallEvaluator` evaluates the Precision of the search.
It computes how many of the first given `eval_at` groundtruth are found in the matches
"""
def __init__(self, eval_at: int, *args, **kwargs):
""""
:param eval_at: k at which evaluation is performed
"""
super().__init__(*args, **kwargs)
self.eval_at = eval_at
@property
def complete_name(self):
return f'Recall@{self.eval_at}'
@as_aggregator
def evaluate(self, matches_ids: Sequence[Any], groundtruth_ids: Sequence[Any], *args, **kwargs) -> float:
""""
:param matches_ids: the matched document identifiers from the request as matched by jina indexers and rankers
:param groundtruth_ids: the expected documents matches ids sorted as they are expected
:return the evaluation metric value for the request document
"""
ret = 0.0
for doc_id in groundtruth_ids[:self.eval_at]:
if doc_id in matches_ids:
ret += 1.0
divisor = min(self.eval_at, len(matches_ids))
if divisor == 0.0:
"""TODO: Agree on a behavior"""
return 0.0
else:
return ret / divisor
| 35.365854 | 117 | 0.648966 | 1,286 | 0.886897 | 0 | 0 | 842 | 0.58069 | 0 | 0 | 598 | 0.412414 |
404be03a1fd1048c68239ebc361551f5a1526980 | 270 | py | Python | tests/schema_mapping/structures/example5.py | danny-vayu/typedpy | e97735a742acbd5f1133e23f08cf43836476686a | [
"MIT"
]
| null | null | null | tests/schema_mapping/structures/example5.py | danny-vayu/typedpy | e97735a742acbd5f1133e23f08cf43836476686a | [
"MIT"
]
| null | null | null | tests/schema_mapping/structures/example5.py | danny-vayu/typedpy | e97735a742acbd5f1133e23f08cf43836476686a | [
"MIT"
]
| null | null | null | from typedpy import Array, DoNotSerialize, Structure, mappers
class Foo(Structure):
i: int
s: str
_serialization_mapper = {"i": "j", "s": "name"}
class Example5(Foo):
a: Array
_serialization_mapper = [{"j": DoNotSerialize}, mappers.TO_LOWERCASE] | 20.769231 | 73 | 0.674074 | 203 | 0.751852 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.066667 |
404bea024a89b873fc6d227cd6a12a54af3b3b8c | 3,447 | py | Python | src/semantic_parsing_with_constrained_lm/eval.py | microsoft/semantic_parsing_with_constrained_lm | 7e3c099500c3102e46d7a47469fe6840580c2b11 | [
"MIT"
]
| 17 | 2021-09-22T13:08:37.000Z | 2022-03-27T10:39:53.000Z | src/semantic_parsing_with_constrained_lm/eval.py | microsoft/semantic_parsing_with_constrained_lm | 7e3c099500c3102e46d7a47469fe6840580c2b11 | [
"MIT"
]
| 1 | 2022-03-12T01:05:15.000Z | 2022-03-12T01:05:15.000Z | src/semantic_parsing_with_constrained_lm/eval.py | microsoft/semantic_parsing_with_constrained_lm | 7e3c099500c3102e46d7a47469fe6840580c2b11 | [
"MIT"
]
| 1 | 2021-12-16T22:26:54.000Z | 2021-12-16T22:26:54.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import dataclasses
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Dict, Generic, List, Optional, Sequence, TypeVar
from semantic_parsing_with_constrained_lm.datum import FullDatum, FullDatumSub
from semantic_parsing_with_constrained_lm.model import ModelResult
Pred = TypeVar("Pred")
Target = TypeVar("Target")
# TODO: Replcae this with a more flexible function suited to each domain
def exact_match_with_logging(
test_datum: FullDatum, kbest: Sequence[ModelResult],
) -> bool:
gold = (
test_datum.canonical.strip(" ")
if test_datum.canonical is not None
else "UNREACHABLE"
)
pred = kbest[0].text.strip(" ") if kbest else ""
print()
print(f"context: {test_datum.agent_context}")
print(f"natural: {test_datum.natural}")
print(f"predicted: {pred}")
print(f"gold: {gold}")
result = gold == pred
print(f"is correct: {result}")
beam_result = False
for i, pred_i in enumerate(kbest):
stripped = pred_i.text.strip(" ")
beam_result = beam_result or gold == stripped
print(f"Beam {i} [{pred_i.cost:.3f}]: {stripped}")
print(f"is correct@{i}: {beam_result}")
print()
return result
class Metric(Generic[Pred, Target], ABC):
"""Used to measure goodness of model results compared to the ground truth.
Stateful over the duration of an experiment run."""
@abstractmethod
def update(self, pred: Pred, target: Target) -> Dict[str, Optional[str]]:
"""Uses `target` and the model predictions `pred` to update the state."""
pass
@abstractmethod
def compute(self) -> Dict[str, float]:
"""Uses the state to compute the final results."""
pass
@abstractmethod
def reset(self) -> None:
"""Reinitializes the state."""
pass
@dataclass
class TopKExactMatch(Metric[Sequence[str], FullDatumSub]):
k: int
correct: List[int] = dataclasses.field(init=False)
total: int = dataclasses.field(init=False)
def __post_init__(self):
self.reset()
def _is_correct(self, pred: str, target: FullDatumSub) -> bool:
"""Can be overridden by child classes."""
return pred == target.canonical
def update(
self, preds: Sequence[str], target: FullDatumSub
) -> Dict[str, Optional[str]]:
self.total += 1
found_correct = False
result: Dict[str, Optional[str]] = {}
for i, pred in enumerate(preds[: self.k]):
correct = self._is_correct(pred, target)
found_correct |= correct
self.correct[i] += found_correct
result[f"rank{i + 1}"] = "correct" if correct else "incorrect"
result[f"top{i + 1}"] = "correct" if found_correct else "incorrect"
# Handle when we have fewer predictions than self.k
for i in range(len(preds), self.k):
self.correct[i] += found_correct
result[f"rank{i + 1}"] = "incorrect"
result[f"top{i + 1}"] = "correct" if found_correct else "incorrect"
return result
def compute(self) -> Dict[str, float]:
result = {}
for i in range(self.k):
result[f"top{i + 1}"] = self.correct[i] / self.total
return result
def reset(self) -> None:
self.correct = [0] * self.k
self.total = 0
| 32.214953 | 81 | 0.630403 | 2,114 | 0.613287 | 0 | 0 | 1,930 | 0.559907 | 0 | 0 | 907 | 0.263127 |
404beb06647e2d6fc143a0b58a7a3cacb5877553 | 959 | py | Python | irrigation_control/irrigation_control_py3/common_irrigation_chains_py3.py | bopopescu/docker_images_a | 348d0982c5962f2ae34d10183ed9522b7a6fe286 | [
"MIT"
]
| 2 | 2018-02-21T03:46:51.000Z | 2019-12-24T16:40:51.000Z | irrigation_control/irrigation_control_py3/common_irrigation_chains_py3.py | bopopescu/docker_images_a | 348d0982c5962f2ae34d10183ed9522b7a6fe286 | [
"MIT"
]
| 7 | 2020-07-16T19:54:08.000Z | 2022-03-02T03:29:07.000Z | irrigation_control/irrigation_control_py3/common_irrigation_chains_py3.py | bopopescu/docker_images_a | 348d0982c5962f2ae34d10183ed9522b7a6fe286 | [
"MIT"
]
| 2 | 2018-04-16T07:02:35.000Z | 2020-07-23T21:57:19.000Z |
class Check_Excessive_Current(object):
def __init__(self,chain_name,cf,handlers,irrigation_io,irrigation_hash_control,get_json_object):
self.get_json_object = get_json_object
cf.define_chain(chain_name, False )
#cf.insert.log("check_excessive_current")
cf.insert.assert_function_reset(self.check_excessive_current)
cf.insert.log("excessive_current_found")
cf.insert.send_event("IRI_CLOSE_MASTER_VALVE",False)
cf.insert.send_event( "RELEASE_IRRIGATION_CONTROL")
cf.insert.one_step(irrigation_io.disable_all_sprinklers )
cf.insert.wait_event_count( count = 15 )
cf.insert.reset()
self.handlers = handlers
self.irrigation_hash_control = irrigation_hash_control
def check_excessive_current(self,cf_handle, chainObj, parameters, event):
#print("check excessive current")
return False #TBD
| 33.068966 | 100 | 0.691345 | 914 | 0.953076 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.171011 |
404c32173164735222505b93f1ef2b7219cec987 | 8,913 | py | Python | lib/surface/spanner/operations/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
]
| 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/spanner/operations/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
]
| null | null | null | lib/surface/spanner/operations/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
]
| 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for spanner operations list."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.api_lib.spanner import backup_operations
from googlecloudsdk.api_lib.spanner import database_operations
from googlecloudsdk.api_lib.spanner import instance_config_operations
from googlecloudsdk.api_lib.spanner import instance_operations
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exceptions
from googlecloudsdk.command_lib.spanner import flags
def _CommonRun(args):
"""Performs run actions common to all List stages."""
is_database_type = (
args.type == 'DATABASE_RESTORE' or args.type == 'DATABASE' or
args.type == 'DATABASE_CREATE' or args.type == 'DATABASE_UPDATE_DDL')
if args.backup or args.type == 'BACKUP':
# Update output table for backup operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
metadata.name.split('/').slice(-1:).join():label=BACKUP,
metadata.database.split('/').slice(-1).join():label=SOURCE_DATABASE,
metadata.progress.startTime:label=START_TIME,
metadata.progress.endTime:label=END_TIME
)
""")
if args.type == 'DATABASE_RESTORE':
# Update output table for restore operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
metadata.name.split('/').slice(-1:).join():label=RESTORED_DATABASE,
metadata.backupInfo.backup.split('/').slice(-1).join():label=SOURCE_BACKUP,
metadata.progress.startTime:label=START_TIME,
metadata.progress.endTime:label=END_TIME
)
""")
elif is_database_type:
# Update output table for database operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
metadata.statements.join(sep="\n"),
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
database().split('/').slice(-1:).join():label=DATABASE_ID
)
""")
# Checks that user only specified either database or backup flag.
if (args.IsSpecified('database') and args.IsSpecified('backup')):
raise c_exceptions.InvalidArgumentException(
'--database or --backup',
'Must specify either --database or --backup. To search backups for a '
'specific database, use the --database flag with --type=BACKUP')
# Checks that the user did not specify the backup flag with the type filter
# set to a database operation type.
if (args.IsSpecified('backup') and is_database_type):
raise c_exceptions.InvalidArgumentException(
'--backup or --type',
'The backup flag cannot be used with the type flag set to a '
'database operation type.')
if args.type == 'INSTANCE':
if args.IsSpecified('database'):
raise c_exceptions.InvalidArgumentException(
'--database or --type',
'The `--database` flag cannot be used with `--type=INSTANCE`.')
if args.IsSpecified('backup'):
raise c_exceptions.InvalidArgumentException(
'--backup or --type',
'The `--backup` flag cannot be used with `--type=INSTANCE`.')
if args.type == 'BACKUP':
if args.database:
db_filter = backup_operations.BuildDatabaseFilter(args.instance,
args.database)
return backup_operations.List(args.instance, db_filter)
if args.backup:
return backup_operations.ListGeneric(args.instance, args.backup)
return backup_operations.List(args.instance)
if is_database_type:
type_filter = database_operations.BuildDatabaseOperationTypeFilter(
args.type)
return database_operations.ListDatabaseOperations(args.instance,
args.database,
type_filter)
if args.backup:
return backup_operations.ListGeneric(args.instance, args.backup)
if args.database:
return database_operations.List(args.instance, args.database)
return instance_operations.List(args.instance)
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class List(base.ListCommand):
"""List the Cloud Spanner operations on the given instance or database."""
detailed_help = {
'EXAMPLES':
textwrap.dedent("""\
To list Cloud Spanner instance operations for an instance, run:
$ {command} --instance=my-instance-id --type=INSTANCE
To list Cloud Spanner backup operations for an instance, run:
$ {command} --instance=my-instance-id --type=BACKUP
To list Cloud Spanner database operations for an instance, run:
$ {command} --instance=my-instance-id --type=DATABASE
To list Cloud Spanner database operations for a database, run:
$ {command} --instance=my-instance-id --database=my-database-id --type=DATABASE
To list Cloud Spanner backup operations for a database, run:
$ {command} --instance=my-instance-id --database=my-database-id --type=BACKUP
To list Cloud Spanner backup operations for a backup, run:
$ {command} --instance=my-instance-id --backup=my-backup-id --type=BACKUP
"""),
}
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go on
the command line after this command. Positional arguments are allowed.
"""
flags.Instance(
positional=False,
text='The ID of the instance the operations are executing on.'
).AddToParser(parser)
flags.AddCommonListArgs(parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
return _CommonRun(args)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AlphaList(List):
"""List the Cloud Spanner operations on the given instance or database or instance-config."""
@staticmethod
def Args(parser):
"""See base class."""
mutex_group = parser.add_group(mutex=True, required=True)
mutex_group.add_argument(
'--instance-config',
completer=flags.InstanceConfigCompleter,
help='The ID of the instance config the operation is executing on.')
mutex_group.add_argument(
'--instance',
completer=flags.InstanceCompleter,
help='The ID of the instance the operation is executing on.')
additional_choices = {
'INSTANCE_CONFIG_CREATE':
'Instance config create operations are returned for the given '
'instance config (--instance-config).',
'INSTANCE_CONFIG_UPDATE':
'Instance config update operations are returned for the given '
'instance config (--instance-config).'
}
flags.AddCommonListArgs(parser, additional_choices)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
if args.instance_config:
type_filter = instance_config_operations.BuildInstanceConfigOperationTypeFilter(
args.type)
return instance_config_operations.List(args.instance_config, type_filter)
return _CommonRun(args)
| 37.766949 | 95 | 0.674072 | 3,529 | 0.395939 | 0 | 0 | 3,640 | 0.408392 | 0 | 0 | 5,340 | 0.599125 |
404cc5bda9dc3a1cc2cecd17725e8022aeed3ad0 | 49,844 | py | Python | pgmpy/tests/test_models/test_SEM.py | predictive-analytics-lab/pgmpy | 6c2a31641adc72793acd130d007190fdb1632271 | [
"MIT"
]
| null | null | null | pgmpy/tests/test_models/test_SEM.py | predictive-analytics-lab/pgmpy | 6c2a31641adc72793acd130d007190fdb1632271 | [
"MIT"
]
| null | null | null | pgmpy/tests/test_models/test_SEM.py | predictive-analytics-lab/pgmpy | 6c2a31641adc72793acd130d007190fdb1632271 | [
"MIT"
]
| null | null | null | import os
import unittest
import numpy as np
import networkx as nx
import numpy.testing as npt
from pgmpy.models import SEM, SEMGraph, SEMAlg
class TestSEM(unittest.TestCase):
def test_from_graph(self):
self.demo = SEM.from_graph(
ebunch=[
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
("xi1", "eta1"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta1", "eta2"),
("xi1", "eta2"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5"),
("y2", "y6"),
("y2", "y4"),
("y3", "y7"),
("y4", "y8"),
("y6", "y8"),
],
)
self.assertSetEqual(self.demo.latents, {"xi1", "eta1", "eta2"})
self.assertSetEqual(
self.demo.observed, {"x1", "x2", "x3", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"}
)
self.assertListEqual(
sorted(self.demo.graph.nodes()),
[
"eta1",
"eta2",
"x1",
"x2",
"x3",
"xi1",
"y1",
"y2",
"y3",
"y4",
"y5",
"y6",
"y7",
"y8",
],
)
self.assertListEqual(
sorted(self.demo.graph.edges()),
sorted(
[
("eta1", "eta2"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
("xi1", "eta1"),
("xi1", "eta2"),
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
]
),
)
self.assertDictEqual(self.demo.graph.edges[("xi1", "x1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "x2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "x3")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "eta1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y3")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y4")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "eta2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "eta2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y5")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y6")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y7")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y8")], {"weight": np.NaN})
npt.assert_equal(
nx.to_numpy_matrix(
self.demo.err_graph, nodelist=sorted(self.demo.err_graph.nodes()), weight=None
),
np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
]
),
)
for edge in self.demo.err_graph.edges():
self.assertDictEqual(self.demo.err_graph.edges[edge], {"weight": np.NaN})
for node in self.demo.err_graph.nodes():
self.assertDictEqual(self.demo.err_graph.nodes[node], {"weight": np.NaN})
def test_from_lavaan(self):
model_str = """# %load model.lav
# measurement model
ind60 =~ x1 + x2 + x3
dem60 =~ y1 + y2 + y3 + y4
dem65 =~ y5 + y6 + y7 + y8
# regressions
dem60 ~ ind60
dem65 ~ ind60 + dem60
# residual correlations
y1 ~~ y5
y2 ~~ y4 + y6
y3 ~~ y7
y4 ~~ y8
y6 ~~ y8
"""
model_from_str = SEM.from_lavaan(string=model_str)
with open("test_model.lav", "w") as f:
f.write(model_str)
model_from_file = SEM.from_lavaan(filename="test_model.lav")
os.remove("test_model.lav")
expected_edges = set(
[
("ind60", "x1"),
("ind60", "x2"),
("ind60", "x3"),
("ind60", "dem60"),
("ind60", "dem65"),
("dem60", "dem65"),
("dem60", "y1"),
("dem60", "y2"),
("dem60", "y3"),
("dem60", "y4"),
("dem65", "y5"),
("dem65", "y6"),
("dem65", "y7"),
("dem65", "y8"),
]
)
# Undirected Graph, needs to handle when edges returned in reverse.
expected_err_edges = set(
[
("y1", "y5"),
("y5", "y1"),
("y2", "y6"),
("y6", "y2"),
("y2", "y4"),
("y4", "y2"),
("y3", "y7"),
("y7", "y3"),
("y4", "y8"),
("y8", "y4"),
("y6", "y8"),
("y8", "y6"),
]
)
expected_latents = set(["dem60", "dem65", "ind60"])
self.assertEqual(set(model_from_str.graph.edges()), expected_edges)
self.assertEqual(set(model_from_file.graph.edges()), expected_edges)
self.assertFalse(set(model_from_str.err_graph.edges()) - expected_err_edges)
self.assertFalse(set(model_from_file.err_graph.edges()) - expected_err_edges)
self.assertEqual(set(model_from_str.latents), expected_latents)
self.assertEqual(set(model_from_file.latents), expected_latents)
def test_from_lisrel(self):
pass # TODO: Add this test when done writing the tests for SEMAlg
def test_from_ram(self):
pass # TODO: Add this.
class TestSEMGraph(unittest.TestCase):
def setUp(self):
self.demo = SEMGraph(
ebunch=[
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
("xi1", "eta1"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta1", "eta2"),
("xi1", "eta2"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5"),
("y2", "y6"),
("y2", "y4"),
("y3", "y7"),
("y4", "y8"),
("y6", "y8"),
],
)
self.union = SEMGraph(
ebunch=[
("yrsmill", "unionsen"),
("age", "laboract"),
("age", "deferenc"),
("deferenc", "laboract"),
("deferenc", "unionsen"),
("laboract", "unionsen"),
],
latents=[],
err_corr=[("yrsmill", "age")],
)
self.demo_params = SEMGraph(
ebunch=[
("xi1", "x1", 0.4),
("xi1", "x2", 0.5),
("xi1", "x3", 0.6),
("xi1", "eta1", 0.3),
("eta1", "y1", 1.1),
("eta1", "y2", 1.2),
("eta1", "y3", 1.3),
("eta1", "y4", 1.4),
("eta1", "eta2", 0.1),
("xi1", "eta2", 0.2),
("eta2", "y5", 0.7),
("eta2", "y6", 0.8),
("eta2", "y7", 0.9),
("eta2", "y8", 1.0),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5", 1.5),
("y2", "y6", 1.6),
("y2", "y4", 1.9),
("y3", "y7", 1.7),
("y4", "y8", 1.8),
("y6", "y8", 2.0),
],
err_var={
"y1": 2.1,
"y2": 2.2,
"y3": 2.3,
"y4": 2.4,
"y5": 2.5,
"y6": 2.6,
"y7": 2.7,
"y8": 2.8,
"x1": 3.1,
"x2": 3.2,
"x3": 3.3,
"eta1": 2.9,
"eta2": 3.0,
"xi1": 3.4,
},
)
self.custom = SEMGraph(
ebunch=[
("xi1", "eta1"),
("xi1", "y1"),
("xi1", "y4"),
("xi1", "x1"),
("xi1", "x2"),
("y4", "y1"),
("y1", "eta2"),
("eta2", "y5"),
("y1", "eta1"),
("eta1", "y2"),
("eta1", "y3"),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[("y1", "y2"), ("y2", "y3")],
err_var={},
)
def test_demo_init(self):
self.assertSetEqual(self.demo.latents, {"xi1", "eta1", "eta2"})
self.assertSetEqual(
self.demo.observed, {"x1", "x2", "x3", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"}
)
self.assertListEqual(
sorted(self.demo.graph.nodes()),
[
"eta1",
"eta2",
"x1",
"x2",
"x3",
"xi1",
"y1",
"y2",
"y3",
"y4",
"y5",
"y6",
"y7",
"y8",
],
)
self.assertListEqual(
sorted(self.demo.graph.edges()),
sorted(
[
("eta1", "eta2"),
("eta1", "y1"),
("eta1", "y2"),
("eta1", "y3"),
("eta1", "y4"),
("eta2", "y5"),
("eta2", "y6"),
("eta2", "y7"),
("eta2", "y8"),
("xi1", "eta1"),
("xi1", "eta2"),
("xi1", "x1"),
("xi1", "x2"),
("xi1", "x3"),
]
),
)
self.assertDictEqual(self.demo.graph.edges[("xi1", "x1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "x2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "x3")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "eta1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y1")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y3")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "y4")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta1", "eta2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("xi1", "eta2")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y5")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y6")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y7")], {"weight": np.NaN})
self.assertDictEqual(self.demo.graph.edges[("eta2", "y8")], {"weight": np.NaN})
npt.assert_equal(
nx.to_numpy_matrix(
self.demo.err_graph, nodelist=sorted(self.demo.err_graph.nodes()), weight=None
),
np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
]
),
)
for edge in self.demo.err_graph.edges():
self.assertDictEqual(self.demo.err_graph.edges[edge], {"weight": np.NaN})
for node in self.demo.err_graph.nodes():
self.assertDictEqual(self.demo.err_graph.nodes[node], {"weight": np.NaN})
def test_union_init(self):
self.assertSetEqual(self.union.latents, set())
self.assertSetEqual(
self.union.observed, {"yrsmill", "unionsen", "age", "laboract", "deferenc"}
)
self.assertListEqual(
sorted(self.union.graph.nodes()),
sorted(["yrsmill", "unionsen", "age", "laboract", "deferenc"]),
)
self.assertListEqual(
sorted(self.union.graph.edges()),
sorted(
[
("yrsmill", "unionsen"),
("age", "laboract"),
("age", "deferenc"),
("deferenc", "laboract"),
("deferenc", "unionsen"),
("laboract", "unionsen"),
]
),
)
self.assertDictEqual(self.union.graph.edges[("yrsmill", "unionsen")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("age", "laboract")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("age", "deferenc")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("deferenc", "laboract")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("deferenc", "unionsen")], {"weight": np.NaN})
self.assertDictEqual(self.union.graph.edges[("laboract", "unionsen")], {"weight": np.NaN})
npt.assert_equal(
nx.to_numpy_matrix(
self.union.err_graph, nodelist=sorted(self.union.err_graph.nodes()), weight=None
),
np.array(
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
]
),
)
for edge in self.union.err_graph.edges():
self.assertDictEqual(self.union.err_graph.edges[edge], {"weight": np.NaN})
for node in self.union.err_graph.nodes():
self.assertDictEqual(self.union.err_graph.nodes[node], {"weight": np.NaN})
def test_demo_param_init(self):
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "x1")], {"weight": 0.4})
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "x2")], {"weight": 0.5})
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "x3")], {"weight": 0.6})
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "eta1")], {"weight": 0.3})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "y1")], {"weight": 1.1})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "y2")], {"weight": 1.2})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "y3")], {"weight": 1.3})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "y4")], {"weight": 1.4})
self.assertDictEqual(self.demo_params.graph.edges[("eta1", "eta2")], {"weight": 0.1})
self.assertDictEqual(self.demo_params.graph.edges[("xi1", "eta2")], {"weight": 0.2})
self.assertDictEqual(self.demo_params.graph.edges[("eta2", "y5")], {"weight": 0.7})
self.assertDictEqual(self.demo_params.graph.edges[("eta2", "y6")], {"weight": 0.8})
self.assertDictEqual(self.demo_params.graph.edges[("eta2", "y7")], {"weight": 0.9})
self.assertDictEqual(self.demo_params.graph.edges[("eta2", "y8")], {"weight": 1.0})
self.assertDictEqual(self.demo_params.err_graph.edges[("y1", "y5")], {"weight": 1.5})
self.assertDictEqual(self.demo_params.err_graph.edges[("y2", "y6")], {"weight": 1.6})
self.assertDictEqual(self.demo_params.err_graph.edges[("y2", "y4")], {"weight": 1.9})
self.assertDictEqual(self.demo_params.err_graph.edges[("y3", "y7")], {"weight": 1.7})
self.assertDictEqual(self.demo_params.err_graph.edges[("y4", "y8")], {"weight": 1.8})
self.assertDictEqual(self.demo_params.err_graph.edges[("y6", "y8")], {"weight": 2.0})
self.assertDictEqual(self.demo_params.err_graph.nodes["y1"], {"weight": 2.1})
self.assertDictEqual(self.demo_params.err_graph.nodes["y2"], {"weight": 2.2})
self.assertDictEqual(self.demo_params.err_graph.nodes["y3"], {"weight": 2.3})
self.assertDictEqual(self.demo_params.err_graph.nodes["y4"], {"weight": 2.4})
self.assertDictEqual(self.demo_params.err_graph.nodes["y5"], {"weight": 2.5})
self.assertDictEqual(self.demo_params.err_graph.nodes["y6"], {"weight": 2.6})
self.assertDictEqual(self.demo_params.err_graph.nodes["y7"], {"weight": 2.7})
self.assertDictEqual(self.demo_params.err_graph.nodes["y8"], {"weight": 2.8})
self.assertDictEqual(self.demo_params.err_graph.nodes["x1"], {"weight": 3.1})
self.assertDictEqual(self.demo_params.err_graph.nodes["x2"], {"weight": 3.2})
self.assertDictEqual(self.demo_params.err_graph.nodes["x3"], {"weight": 3.3})
self.assertDictEqual(self.demo_params.err_graph.nodes["eta1"], {"weight": 2.9})
self.assertDictEqual(self.demo_params.err_graph.nodes["eta2"], {"weight": 3.0})
def test_get_full_graph_struct(self):
full_struct = self.union._get_full_graph_struct()
self.assertFalse(
set(full_struct.nodes())
- set(
[
"yrsmill",
"unionsen",
"age",
"laboract",
"deferenc",
".yrsmill",
".unionsen",
".age",
".laboract",
".deferenc",
"..ageyrsmill",
"..yrsmillage",
]
)
)
self.assertFalse(
set(full_struct.edges())
- set(
[
("yrsmill", "unionsen"),
("age", "laboract"),
("age", "deferenc"),
("deferenc", "laboract"),
("deferenc", "unionsen"),
("laboract", "unionsen"),
(".yrsmill", "yrsmill"),
(".unionsen", "unionsen"),
(".age", "age"),
(".laboract", "laboract"),
(".deferenc", "deferenc"),
("..ageyrsmill", ".age"),
("..ageyrsmill", ".yrsmill"),
("..yrsmillage", ".age"),
("..yrsmillage", ".yrsmill"),
]
)
)
def test_active_trail_nodes(self):
demo_nodes = ["x1", "x2", "x3", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"]
for node in demo_nodes:
self.assertSetEqual(
self.demo.active_trail_nodes(node, struct="full")[node], set(demo_nodes)
)
union_nodes = self.union.graph.nodes()
active_trails = self.union.active_trail_nodes(list(union_nodes), struct="full")
for node in union_nodes:
self.assertSetEqual(active_trails[node], set(union_nodes))
self.assertSetEqual(
self.union.active_trail_nodes("age", observed=["laboract", "deferenc", "unionsen"])[
"age"
],
{"age", "yrsmill"},
)
def test_get_scaling_indicators(self):
demo_scaling_indicators = self.demo.get_scaling_indicators()
self.assertTrue(demo_scaling_indicators["eta1"] in ["y1", "y2", "y3", "y4"])
self.assertTrue(demo_scaling_indicators["eta2"] in ["y5", "y6", "y7", "y8"])
self.assertTrue(demo_scaling_indicators["xi1"] in ["x1", "x2", "x3"])
union_scaling_indicators = self.union.get_scaling_indicators()
self.assertDictEqual(union_scaling_indicators, dict())
custom_scaling_indicators = self.custom.get_scaling_indicators()
self.assertTrue(custom_scaling_indicators["xi1"] in ["x1", "x2", "y1", "y4"])
self.assertTrue(custom_scaling_indicators["eta1"] in ["y2", "y3"])
self.assertTrue(custom_scaling_indicators["eta2"] in ["y5"])
def test_to_lisrel(self):
demo = SEMGraph(
ebunch=[
("xi1", "x1", 1.000),
("xi1", "x2", 2.180),
("xi1", "x3", 1.819),
("xi1", "eta1", 1.483),
("eta1", "y1", 1.000),
("eta1", "y2", 1.257),
("eta1", "y3", 1.058),
("eta1", "y4", 1.265),
("eta1", "eta2", 0.837),
("xi1", "eta2", 0.572),
("eta2", "y5", 1.000),
("eta2", "y6", 1.186),
("eta2", "y7", 1.280),
("eta2", "y8", 1.266),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5", 0.624),
("y2", "y6", 2.153),
("y2", "y4", 1.313),
("y3", "y7", 0.795),
("y4", "y8", 0.348),
("y6", "y8", 1.356),
],
err_var={
"x1": 0.082,
"x2": 0.120,
"x3": 0.467,
"y1": 1.891,
"y2": 7.373,
"y3": 5.067,
"y4": 3.148,
"y5": 2.351,
"y6": 4.954,
"y7": 3.431,
"y8": 3.254,
"xi1": 0.448,
"eta1": 3.956,
"eta2": 0.172,
},
)
demo_lisrel = demo.to_lisrel()
indexing = []
vars_ordered = [
"y1",
"y2",
"y3",
"y4",
"y5",
"y6",
"y7",
"y8",
"x1",
"x2",
"x3",
"xi1",
"eta1",
"eta2",
]
for var in vars_ordered:
indexing.append(demo_lisrel.eta.index(var))
eta_reorder = [demo_lisrel.eta[i] for i in indexing]
B_reorder = demo_lisrel.B[indexing, :][:, indexing]
B_fixed_reorder = demo_lisrel.B_fixed_mask[indexing, :][:, indexing]
zeta_reorder = demo_lisrel.zeta[indexing, :][:, indexing]
zeta_fixed_reorder = demo_lisrel.zeta_fixed_mask[indexing, :][:, indexing]
wedge_y_reorder = demo_lisrel.wedge_y[:, indexing]
self.assertEqual(vars_ordered, eta_reorder)
npt.assert_array_equal(
B_reorder,
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
]
),
)
npt.assert_array_equal(
zeta_reorder,
np.array(
[
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
]
),
)
npt.assert_array_equal(
B_fixed_reorder,
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.000, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.257, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.058, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.265, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.000],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.186],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.280],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.266],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.000, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.180, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.819, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.483, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.572, 0.837, 0],
]
),
)
npt.assert_array_equal(
zeta_fixed_reorder,
np.array(
[
[1.891, 0, 0, 0, 0.624, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 7.373, 0, 1.313, 0, 2.153, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 5.067, 0, 0, 0, 0.795, 0, 0, 0, 0, 0, 0, 0],
[0, 1.313, 0, 3.148, 0, 0, 0, 0.348, 0, 0, 0, 0, 0, 0],
[0.624, 0, 0, 0, 2.351, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2.153, 0, 0, 0, 4.954, 0, 1.356, 0, 0, 0, 0, 0, 0],
[0, 0, 0.795, 0, 0, 0, 3.431, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0.348, 0, 1.356, 0, 3.254, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0.082, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0.120, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.467, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.448, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.956, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.172],
]
),
)
npt.assert_array_equal(
demo_lisrel.wedge_y,
np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
]
),
)
def test_to_from_lisrel(self):
demo_lisrel = self.demo.to_lisrel()
union_lisrel = self.union.to_lisrel()
demo_params_lisrel = self.demo_params.to_lisrel()
custom_lisrel = self.custom.to_lisrel()
demo_graph = demo_lisrel.to_SEMGraph()
union_graph = union_lisrel.to_SEMGraph()
demo_params_graph = demo_params_lisrel.to_SEMGraph()
custom_graph = custom_lisrel.to_SEMGraph()
# Test demo
self.assertSetEqual(set(self.demo.graph.nodes()), set(demo_graph.graph.nodes()))
self.assertSetEqual(set(self.demo.graph.edges()), set(demo_graph.graph.edges()))
self.assertSetEqual(set(self.demo.err_graph.nodes()), set(demo_graph.err_graph.nodes()))
npt.assert_array_equal(
nx.to_numpy_matrix(self.demo.err_graph, nodelist=sorted(self.demo.err_graph.nodes())),
nx.to_numpy_matrix(demo_graph, nodelist=sorted(demo_graph.err_graph.nodes())),
)
self.assertSetEqual(
set(self.demo.full_graph_struct.nodes()), set(demo_graph.full_graph_struct.nodes())
)
self.assertSetEqual(
set(self.demo.full_graph_struct.edges()), set(demo_graph.full_graph_struct.edges())
)
self.assertSetEqual(self.demo.latents, demo_graph.latents)
self.assertSetEqual(self.demo.observed, demo_graph.observed)
# Test union
self.assertSetEqual(set(self.union.graph.nodes()), set(union_graph.graph.nodes()))
self.assertSetEqual(set(self.union.graph.edges()), set(union_graph.graph.edges()))
self.assertSetEqual(set(self.union.err_graph.nodes()), set(union_graph.err_graph.nodes()))
npt.assert_array_equal(
nx.to_numpy_matrix(self.union.err_graph, nodelist=sorted(self.union.err_graph.nodes())),
nx.to_numpy_matrix(union_graph, nodelist=sorted(union_graph.err_graph.nodes())),
)
self.assertSetEqual(
set(self.union.full_graph_struct.nodes()), set(union_graph.full_graph_struct.nodes())
)
self.assertSetEqual(
set(self.union.full_graph_struct.edges()), set(union_graph.full_graph_struct.edges())
)
self.assertSetEqual(self.union.latents, union_graph.latents)
self.assertSetEqual(self.union.observed, union_graph.observed)
# Test demo_params
self.assertSetEqual(
set(self.demo_params.graph.nodes()), set(demo_params_graph.graph.nodes())
)
self.assertSetEqual(
set(self.demo_params.graph.edges()), set(demo_params_graph.graph.edges())
)
self.assertSetEqual(
set(self.demo_params.err_graph.nodes()), set(demo_params_graph.err_graph.nodes())
)
npt.assert_array_equal(
nx.to_numpy_matrix(
self.demo_params.err_graph,
nodelist=sorted(self.demo_params.err_graph.nodes()),
weight=None,
),
nx.to_numpy_matrix(
demo_graph.err_graph,
nodelist=sorted(demo_params_graph.err_graph.nodes()),
weight=None,
),
)
self.assertSetEqual(
set(self.demo_params.full_graph_struct.nodes()),
set(demo_params_graph.full_graph_struct.nodes()),
)
self.assertSetEqual(
set(self.demo_params.full_graph_struct.edges()),
set(demo_params_graph.full_graph_struct.edges()),
)
self.assertSetEqual(self.demo_params.latents, demo_params_graph.latents)
self.assertSetEqual(self.demo_params.observed, demo_params_graph.observed)
# Test demo
self.assertSetEqual(set(self.custom.graph.nodes()), set(custom_graph.graph.nodes()))
self.assertSetEqual(set(self.custom.graph.edges()), set(custom_graph.graph.edges()))
self.assertSetEqual(set(self.custom.err_graph.nodes()), set(custom_graph.err_graph.nodes()))
npt.assert_array_equal(
nx.to_numpy_matrix(
self.custom.err_graph, nodelist=sorted(self.custom.err_graph.nodes())
),
nx.to_numpy_matrix(custom_graph, nodelist=sorted(custom_graph.err_graph.nodes())),
)
self.assertSetEqual(
set(self.custom.full_graph_struct.nodes()), set(custom_graph.full_graph_struct.nodes())
)
self.assertSetEqual(
set(self.custom.full_graph_struct.edges()), set(custom_graph.full_graph_struct.edges())
)
self.assertSetEqual(self.custom.latents, custom_graph.latents)
self.assertSetEqual(self.custom.observed, custom_graph.observed)
def test_iv_transformations_demo(self):
scale = {"eta1": "y1", "eta2": "y5", "xi1": "x1"}
self.assertRaises(ValueError, self.demo._iv_transformations, "x1", "y1", scale)
for y in ["y2", "y3", "y4"]:
full_graph, dependent_var = self.demo._iv_transformations(
X="eta1", Y=y, scaling_indicators=scale
)
self.assertEqual(dependent_var, y)
self.assertTrue((".y1", y) in full_graph.edges)
self.assertFalse(("eta1", y) in full_graph.edges)
for y in ["y6", "y7", "y8"]:
full_graph, dependent_var = self.demo._iv_transformations(
X="eta2", Y=y, scaling_indicators=scale
)
self.assertEqual(dependent_var, y)
self.assertTrue((".y5", y) in full_graph.edges)
self.assertFalse(("eta2", y) in full_graph.edges)
full_graph, dependent_var = self.demo._iv_transformations(
X="xi1", Y="eta1", scaling_indicators=scale
)
self.assertEqual(dependent_var, "y1")
self.assertTrue((".eta1", "y1") in full_graph.edges())
self.assertTrue((".x1", "y1") in full_graph.edges())
self.assertFalse(("xi1", "eta1") in full_graph.edges())
full_graph, dependent_var = self.demo._iv_transformations(
X="xi1", Y="eta2", scaling_indicators=scale
)
self.assertEqual(dependent_var, "y5")
self.assertTrue((".y1", "y5") in full_graph.edges())
self.assertTrue((".eta2", "y5") in full_graph.edges())
self.assertTrue((".x1", "y5") in full_graph.edges())
self.assertFalse(("eta1", "eta2") in full_graph.edges())
self.assertFalse(("xi1", "eta2") in full_graph.edges())
full_graph, dependent_var = self.demo._iv_transformations(
X="eta1", Y="eta2", scaling_indicators=scale
)
self.assertEqual(dependent_var, "y5")
self.assertTrue((".y1", "y5") in full_graph.edges())
self.assertTrue((".eta2", "y5") in full_graph.edges())
self.assertTrue((".x1", "y5") in full_graph.edges())
self.assertFalse(("eta1", "eta2") in full_graph.edges())
self.assertFalse(("xi1", "eta2") in full_graph.edges())
def test_iv_transformations_union(self):
scale = {}
for u, v in self.union.graph.edges():
full_graph, dependent_var = self.union._iv_transformations(
u, v, scaling_indicators=scale
)
self.assertFalse((u, v) in full_graph.edges())
self.assertEqual(dependent_var, v)
def test_get_ivs_demo(self):
scale = {"eta1": "y1", "eta2": "y5", "xi1": "x1"}
self.assertSetEqual(
self.demo.get_ivs("eta1", "y2", scaling_indicators=scale),
{"x1", "x2", "x3", "y3", "y7", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("eta1", "y3", scaling_indicators=scale),
{"x1", "x2", "x3", "y2", "y4", "y6", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("eta1", "y4", scaling_indicators=scale),
{"x1", "x2", "x3", "y3", "y6", "y7"},
)
self.assertSetEqual(
self.demo.get_ivs("eta2", "y6", scaling_indicators=scale),
{"x1", "x2", "x3", "y3", "y4", "y7"},
)
self.assertSetEqual(
self.demo.get_ivs("eta2", "y7", scaling_indicators=scale),
{"x1", "x2", "x3", "y2", "y4", "y6", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("eta2", "y8", scaling_indicators=scale),
{"x1", "x2", "x3", "y2", "y3", "y7"},
)
self.assertSetEqual(
self.demo.get_ivs("xi1", "x2", scaling_indicators=scale),
{"x3", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("xi1", "x3", scaling_indicators=scale),
{"x2", "y1", "y2", "y3", "y4", "y5", "y6", "y7", "y8"},
)
self.assertSetEqual(
self.demo.get_ivs("xi1", "eta1", scaling_indicators=scale), {"x2", "x3"}
)
self.assertSetEqual(
self.demo.get_ivs("xi1", "eta2", scaling_indicators=scale),
{"x2", "x3", "y2", "y3", "y4"},
)
self.assertSetEqual(
self.demo.get_ivs("eta1", "eta2", scaling_indicators=scale),
{"x2", "x3", "y2", "y3", "y4"},
)
def test_get_conditional_ivs_demo(self):
scale = {"eta1": "y1", "eta2": "y5", "xi1": "x1"}
self.assertEqual(self.demo.get_conditional_ivs("eta1", "y2", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta1", "y3", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta1", "y4", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta2", "y6", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta2", "y7", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("eta2", "y8", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("xi1", "x2", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("xi1", "x3", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("xi1", "eta1", scaling_indicators=scale), [])
self.assertEqual(self.demo.get_conditional_ivs("xi1", "eta2", scaling_indicators=scale), [])
self.assertEqual(
self.demo.get_conditional_ivs("eta1", "eta2", scaling_indicators=scale), []
)
def test_get_ivs_union(self):
scale = {}
self.assertSetEqual(
self.union.get_ivs("yrsmill", "unionsen", scaling_indicators=scale), set()
)
self.assertSetEqual(
self.union.get_ivs("deferenc", "unionsen", scaling_indicators=scale), set()
)
self.assertSetEqual(
self.union.get_ivs("laboract", "unionsen", scaling_indicators=scale), set()
)
self.assertSetEqual(
self.union.get_ivs("deferenc", "laboract", scaling_indicators=scale), set()
)
self.assertSetEqual(
self.union.get_ivs("age", "laboract", scaling_indicators=scale), {"yrsmill"}
)
self.assertSetEqual(
self.union.get_ivs("age", "deferenc", scaling_indicators=scale), {"yrsmill"}
)
def test_get_conditional_ivs_union(self):
self.assertEqual(
self.union.get_conditional_ivs("yrsmill", "unionsen"),
[("age", {"laboract", "deferenc"})],
)
# This case wouldn't have conditonal IV if the Total effect between `deferenc` and
# `unionsen` needs to be computed because one of the conditional variable lies on the
# effect path.
self.assertEqual(
self.union.get_conditional_ivs("deferenc", "unionsen"),
[("age", {"yrsmill", "laboract"})],
)
self.assertEqual(
self.union.get_conditional_ivs("laboract", "unionsen"),
[("age", {"yrsmill", "deferenc"})],
)
self.assertEqual(self.union.get_conditional_ivs("deferenc", "laboract"), [])
self.assertEqual(
self.union.get_conditional_ivs("age", "laboract"), [("yrsmill", {"deferenc"})]
)
self.assertEqual(self.union.get_conditional_ivs("age", "deferenc"), [])
def test_iv_transformations_custom(self):
scale_custom = {"eta1": "y2", "eta2": "y5", "xi1": "x1"}
full_graph, var = self.custom._iv_transformations(
"xi1", "x2", scaling_indicators=scale_custom
)
self.assertEqual(var, "x2")
self.assertTrue((".x1", "x2") in full_graph.edges())
self.assertFalse(("xi1", "x2") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"xi1", "y4", scaling_indicators=scale_custom
)
self.assertEqual(var, "y4")
self.assertTrue((".x1", "y4") in full_graph.edges())
self.assertFalse(("xi1", "y4") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"xi1", "y1", scaling_indicators=scale_custom
)
self.assertEqual(var, "y1")
self.assertTrue((".x1", "y1") in full_graph.edges())
self.assertFalse(("xi1", "y1") in full_graph.edges())
self.assertFalse(("y4", "y1") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"xi1", "eta1", scaling_indicators=scale_custom
)
self.assertEqual(var, "y2")
self.assertTrue((".eta1", "y2") in full_graph.edges())
self.assertTrue((".x1", "y2") in full_graph.edges())
self.assertFalse(("y1", "eta1") in full_graph.edges())
self.assertFalse(("xi1", "eta1") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"y1", "eta1", scaling_indicators=scale_custom
)
self.assertEqual(var, "y2")
self.assertTrue((".eta1", "y2") in full_graph.edges())
self.assertTrue((".x1", "y2") in full_graph.edges())
self.assertFalse(("y1", "eta1") in full_graph.edges())
self.assertFalse(("xi1", "eta1") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"y1", "eta2", scaling_indicators=scale_custom
)
self.assertEqual(var, "y5")
self.assertTrue((".eta2", "y5") in full_graph.edges())
self.assertFalse(("y1", "eta2") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"y4", "y1", scaling_indicators=scale_custom
)
self.assertEqual(var, "y1")
self.assertFalse(("y4", "y1") in full_graph.edges())
full_graph, var = self.custom._iv_transformations(
"eta1", "y3", scaling_indicators=scale_custom
)
self.assertEqual(var, "y3")
self.assertTrue((".y2", "y3") in full_graph.edges())
self.assertFalse(("eta1", "y3") in full_graph.edges())
def test_get_ivs_custom(self):
scale_custom = {"eta1": "y2", "eta2": "y5", "xi1": "x1"}
self.assertSetEqual(
self.custom.get_ivs("xi1", "x2", scaling_indicators=scale_custom),
{"y1", "y2", "y3", "y4", "y5"},
)
self.assertSetEqual(
self.custom.get_ivs("xi1", "y4", scaling_indicators=scale_custom), {"x2"}
)
self.assertSetEqual(
self.custom.get_ivs("xi1", "y1", scaling_indicators=scale_custom), {"x2", "y4"}
)
self.assertSetEqual(
self.custom.get_ivs("xi1", "eta1", scaling_indicators=scale_custom), {"x2", "y4"}
)
# TODO: Test this and fix.
self.assertSetEqual(
self.custom.get_ivs("y1", "eta1", scaling_indicators=scale_custom), {"x2", "y4", "y5"}
)
self.assertSetEqual(
self.custom.get_ivs("y1", "eta2", scaling_indicators=scale_custom),
{"x1", "x2", "y2", "y3", "y4"},
)
self.assertSetEqual(self.custom.get_ivs("y4", "y1", scaling_indicators=scale_custom), set())
self.assertSetEqual(
self.custom.get_ivs("eta1", "y3", scaling_indicators=scale_custom), {"x1", "x2", "y4"}
)
def test_small_model_ivs(self):
model1 = SEMGraph(
ebunch=[("X", "Y"), ("I", "X"), ("W", "I")],
latents=[],
err_corr=[("W", "Y")],
err_var={},
)
self.assertEqual(model1.get_conditional_ivs("X", "Y"), [("I", {"W"})])
model2 = SEMGraph(
ebunch=[("x", "y"), ("z", "x"), ("w", "z"), ("w", "u"), ("u", "x"), ("u", "y")],
latents=["u"],
)
self.assertEqual(model2.get_conditional_ivs("x", "y"), [("z", {"w"})])
model3 = SEMGraph(ebunch=[("x", "y"), ("u", "x"), ("u", "y"), ("z", "x")], latents=["u"])
self.assertEqual(model3.get_ivs("x", "y"), {"z"})
model4 = SEMGraph(ebunch=[("x", "y"), ("z", "x"), ("u", "x"), ("u", "y")])
self.assertEqual(model4.get_conditional_ivs("x", "y"), [("z", {"u"})])
class TestSEMAlg(unittest.TestCase):
def setUp(self):
self.demo = SEMGraph(
ebunch=[
("xi1", "x1", 1.000),
("xi1", "x2", 2.180),
("xi1", "x3", 1.819),
("xi1", "eta1", 1.483),
("eta1", "y1", 1.000),
("eta1", "y2", 1.257),
("eta1", "y3", 1.058),
("eta1", "y4", 1.265),
("eta1", "eta2", 0.837),
("xi1", "eta2", 0.572),
("eta2", "y5", 1.000),
("eta2", "y6", 1.186),
("eta2", "y7", 1.280),
("eta2", "y8", 1.266),
],
latents=["xi1", "eta1", "eta2"],
err_corr=[
("y1", "y5", 0.624),
("y2", "y6", 2.153),
("y2", "y4", 1.313),
("y3", "y7", 0.795),
("y4", "y8", 0.348),
("y6", "y8", 1.356),
],
err_var={
"x1": 0.082,
"x2": 0.120,
"x3": 0.467,
"y1": 1.891,
"y2": 7.373,
"y3": 5.067,
"y4": 3.148,
"y5": 2.351,
"y6": 4.954,
"y7": 3.431,
"y8": 3.254,
"xi1": 0.448,
"eta1": 3.956,
"eta2": 0.172,
},
)
self.demo_lisrel = self.demo.to_lisrel()
self.small_model = SEM.from_graph(
ebunch=[("X", "Y", 0.3)], latents=[], err_var={"X": 0.1, "Y": 0.1}
)
self.small_model_lisrel = self.small_model.to_lisrel()
def test_generate_samples(self):
samples = self.small_model_lisrel.generate_samples(n_samples=100)
samples = self.demo_lisrel.generate_samples(n_samples=100)
| 41.536667 | 100 | 0.455862 | 49,691 | 0.99693 | 0 | 0 | 0 | 0 | 0 | 0 | 7,272 | 0.145895 |
404d173b85da7aa2302b72d549875f4086a67bcc | 1,790 | py | Python | data_scripts/translation.py | wangcongcong123/transection | 3b931ce09c9b5e03ec6afdea6f58a317ad07361b | [
"MIT"
]
| 4 | 2021-01-11T06:21:27.000Z | 2021-12-19T17:49:07.000Z | data_scripts/translation.py | wangcongcong123/transection | 3b931ce09c9b5e03ec6afdea6f58a317ad07361b | [
"MIT"
]
| null | null | null | data_scripts/translation.py | wangcongcong123/transection | 3b931ce09c9b5e03ec6afdea6f58a317ad07361b | [
"MIT"
]
| 2 | 2021-01-21T02:48:49.000Z | 2021-03-19T09:45:52.000Z | # coding=utf-8
# This script is finished following HF's datasets' template:
# https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
# More examples as references to write a customized dataset can be found here:
# https://github.com/huggingface/datasets/tree/master/datasets
from __future__ import absolute_import, division, print_function
import json
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
"""
_TRAIN_DOWNLOAD_URL = "data/train.json"
_VAL_DOWNLOAD_URL = "data/val.json"
class Translation(datasets.GeneratorBasedBuilder):
"""customize dataset."""
# VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"source": datasets.Value("string"),
"target": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="#",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
val_path = dl_manager.download_and_extract(_VAL_DOWNLOAD_URL)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path}),
]
def _generate_examples(self, filepath):
with open(filepath, encoding='utf-8') as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"source": data["english"],
"target": data["chinese"],
}
| 32.545455 | 103 | 0.622346 | 1,264 | 0.706145 | 313 | 0.17486 | 0 | 0 | 0 | 0 | 505 | 0.282123 |
404f20db207c728bba35266d11df1248aa4d138a | 7,941 | py | Python | utils/chat_formatting.py | lyricalpaws/snekbot | 704197777dbaa284d163a95642e224d6efe2c4b2 | [
"MIT"
]
| 13 | 2018-11-26T15:55:28.000Z | 2022-02-05T16:07:02.000Z | utils/chat_formatting.py | lyricalpaws/snekbot | 704197777dbaa284d163a95642e224d6efe2c4b2 | [
"MIT"
]
| 8 | 2018-11-12T19:04:01.000Z | 2018-11-23T15:11:55.000Z | utils/chat_formatting.py | lyricalpaws/snekbot | 704197777dbaa284d163a95642e224d6efe2c4b2 | [
"MIT"
]
| 23 | 2019-01-01T23:53:37.000Z | 2022-03-12T14:52:45.000Z | import itertools
from typing import Sequence, Iterator
# Source: https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/core/utils/chat_formatting.py
def error(text: str) -> str:
"""Get text prefixed with an error emoji.
Returns
-------
str
The new message.
"""
return "\N{NO ENTRY SIGN} {}".format(text)
def warning(text: str) -> str:
"""Get text prefixed with a warning emoji.
Returns
-------
str
The new message.
"""
return "\N{WARNING SIGN} {}".format(text)
def info(text: str) -> str:
"""Get text prefixed with an info emoji.
Returns
-------
str
The new message.
"""
return "\N{INFORMATION SOURCE} {}".format(text)
def question(text: str) -> str:
"""Get text prefixed with a question emoji.
Returns
-------
str
The new message.
"""
return "\N{BLACK QUESTION MARK ORNAMENT} {}".format(text)
def bold(text: str) -> str:
"""Get the given text in bold.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "**{}**".format(text)
def box(text: str, lang: str = "") -> str:
"""Get the given text in a code block.
Parameters
----------
text : str
The text to be marked up.
lang : `str`, optional
The syntax highlighting language for the codeblock.
Returns
-------
str
The marked up text.
"""
ret = "```{}\n{}\n```".format(lang, text)
return ret
def inline(text: str) -> str:
"""Get the given text as inline code.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "`{}`".format(text)
def italics(text: str) -> str:
"""Get the given text in italics.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "*{}*".format(text)
def bordered(*columns: Sequence[str], ascii_border: bool = False) -> str:
"""Get two blocks of text in a borders.
Note
----
This will only work with a monospaced font.
Parameters
----------
*columns : `sequence` of `str`
The columns of text, each being a list of lines in that column.
ascii_border : bool
Whether or not the border should be pure ASCII.
Returns
-------
str
The bordered text.
"""
borders = {
"TL": "-" if ascii_border else "┌", # Top-left
"TR": "-" if ascii_border else "┐", # Top-right
"BL": "-" if ascii_border else "└", # Bottom-left
"BR": "-" if ascii_border else "┘", # Bottom-right
"HZ": "-" if ascii_border else "─", # Horizontal
"VT": "|" if ascii_border else "│", # Vertical
}
sep = " " * 4 # Separator between boxes
widths = tuple(
max(len(row) for row in column) + 9 for column in columns
) # width of each col
colsdone = [False] * len(columns) # whether or not each column is done
lines = [sep.join("{TL}" + "{HZ}" * width + "{TR}" for width in widths)]
for line in itertools.zip_longest(*columns):
row = []
for colidx, column in enumerate(line):
width = widths[colidx]
done = colsdone[colidx]
if column is None:
if not done:
# bottom border of column
column = "{HZ}" * width
row.append("{BL}" + column + "{BR}")
colsdone[colidx] = True # mark column as done
else:
# leave empty
row.append(" " * (width + 2))
else:
column += " " * (width - len(column)) # append padded spaces
row.append("{VT}" + column + "{VT}")
lines.append(sep.join(row))
final_row = []
for width, done in zip(widths, colsdone):
if not done:
final_row.append("{BL}" + "{HZ}" * width + "{BR}")
else:
final_row.append(" " * (width + 2))
lines.append(sep.join(final_row))
return "\n".join(lines).format(**borders)
def pagify(
text: str,
delims: Sequence[str] = ["\n"],
*,
priority: bool = False,
escape_mass_mentions: bool = True,
shorten_by: int = 8,
page_length: int = 2000
) -> Iterator[str]:
"""Generate multiple pages from the given text.
Note
----
This does not respect code blocks or inline code.
Parameters
----------
text : str
The content to pagify and send.
delims : `sequence` of `str`, optional
Characters where page breaks will occur. If no delimiters are found
in a page, the page will break after ``page_length`` characters.
By default this only contains the newline.
Other Parameters
----------------
priority : `bool`
Set to :code:`True` to choose the page break delimiter based on the
order of ``delims``. Otherwise, the page will always break at the
last possible delimiter.
escape_mass_mentions : `bool`
If :code:`True`, any mass mentions (here or everyone) will be
silenced.
shorten_by : `int`
How much to shorten each page by. Defaults to 8.
page_length : `int`
The maximum length of each page. Defaults to 2000.
Yields
------
`str`
Pages of the given text.
"""
in_text = text
page_length -= shorten_by
while len(in_text) > page_length:
this_page_len = page_length
if escape_mass_mentions:
this_page_len -= in_text.count("@here", 0, page_length) + in_text.count(
"@everyone", 0, page_length
)
closest_delim = (in_text.rfind(d, 1, this_page_len) for d in delims)
if priority:
closest_delim = next((x for x in closest_delim if x > 0), -1)
else:
closest_delim = max(closest_delim)
closest_delim = closest_delim if closest_delim != -1 else this_page_len
if escape_mass_mentions:
to_send = escape(in_text[:closest_delim], mass_mentions=True)
else:
to_send = in_text[:closest_delim]
if not to_send.strip():
yield to_send
in_text = in_text[closest_delim:]
if not in_text.strip():
if escape_mass_mentions:
yield escape(in_text, mass_mentions=True)
else:
yield in_text
def strikethrough(text: str) -> str:
"""Get the given text with a strikethrough.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "~~{}~~".format(text)
def underline(text: str) -> str:
"""Get the given text with an underline.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "__{}__".format(text)
def escape(text: str, *, mass_mentions: bool = False, formatting: bool = False) -> str:
"""Get text with all mass mentions or markdown escaped.
Parameters
----------
text : str
The text to be escaped.
mass_mentions : `bool`, optional
Set to :code:`True` to escape mass mentions in the text.
formatting : `bool`, optional
Set to :code:`True` to escpae any markdown formatting in the text.
Returns
-------
str
The escaped text.
"""
if mass_mentions:
text = text.replace("@everyone", "@\u200beveryone")
text = text.replace("@here", "@\u200bhere")
if formatting:
text = (
text.replace("`", "\\`")
.replace("*", "\\*")
.replace("_", "\\_")
.replace("~", "\\~")
)
return text
| 27.28866 | 109 | 0.546783 | 0 | 0 | 2,301 | 0.289325 | 0 | 0 | 0 | 0 | 4,223 | 0.530995 |
404ff68f947024e93fe50b765fa029be24f36c84 | 35,410 | py | Python | strategy/trade/strategymargintrade.py | firebird631/siis | 8d64e8fb67619aaa5c0a62fda9de51dedcd47796 | [
"PostgreSQL"
]
| null | null | null | strategy/trade/strategymargintrade.py | firebird631/siis | 8d64e8fb67619aaa5c0a62fda9de51dedcd47796 | [
"PostgreSQL"
]
| null | null | null | strategy/trade/strategymargintrade.py | firebird631/siis | 8d64e8fb67619aaa5c0a62fda9de51dedcd47796 | [
"PostgreSQL"
]
| null | null | null | # @date 2018-12-28
# @author Frederic Scherma, All rights reserved without prejudices.
# @license Copyright (c) 2018 Dream Overflow
# Strategy trade for margin with multiples positions.
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Tuple
if TYPE_CHECKING:
from trader.trader import Trader
from instrument.instrument import Instrument
from strategy.strategytrader import StrategyTrader
from strategy.strategytradercontext import StrategyTraderContextBuilder
from common.signal import Signal
from trader.order import Order
from .strategytrade import StrategyTrade
import logging
logger = logging.getLogger('siis.strategy.margintrade')
class StrategyMarginTrade(StrategyTrade):
"""
Specialization for margin trading.
This type of trade is related to margin trading market, allowing or not hedging, where there is a
position identifier per trade, but generally in the same direction (no hedging).
Works with crypto margin brokers (kraken...).
@todo do we need like with asset trade an exit_trades list to compute the axp and x values, because
if we use cumulative-filled and avg-price we have the same problem here too.
@todo have to check about position_updated qty with direction maybe or take care to have trade signal and
distinct entry from exit
@todo fees and commissions
"""
__slots__ = 'create_ref_oid', 'stop_ref_oid', 'limit_ref_oid', 'create_oid', 'stop_oid', 'limit_oid', \
'position_id', 'leverage', 'stop_order_qty', 'limit_order_qty'
def __init__(self, timeframe: float):
super().__init__(StrategyTrade.TRADE_MARGIN, timeframe)
self.create_ref_oid = None
self.stop_ref_oid = None
self.limit_ref_oid = None
self.create_oid = None # related entry order id
self.stop_oid = None # related stop order id
self.limit_oid = None # related limit order id
self.position_id = None # related informal position id
self.leverage = 1.0
self.stop_order_qty = 0.0 # if stop_oid then this is the qty placed on the stop order
self.limit_order_qty = 0.0 # if limit_oid then this is the qty placed on the limit order
def open(self, trader: Trader, instrument: Instrument, direction: int, order_type: int,
order_price: float, quantity: float, take_profit: float, stop_loss: float,
leverage: float = 1.0, hedging: Optional[bool] = None) -> bool:
"""
Open a position or buy an asset.
"""
if self._entry_state != StrategyTrade.STATE_NEW:
return False
order = Order(trader, instrument.market_id)
order.direction = direction
order.price = order_price
order.order_type = order_type
order.quantity = quantity
order.post_only = False
order.margin_trade = True
order.leverage = leverage
if hedging:
order.hedging = hedging
# generated a reference order id
trader.set_ref_order_id(order)
self.create_ref_oid = order.ref_order_id
self.dir = order.direction
self.op = order.price # retains the order price
self.oq = order.quantity # ordered quantity
self.tp = take_profit
self.sl = stop_loss
self.leverage = leverage
self._stats['entry-order-type'] = order.order_type
if trader.create_order(order, instrument) > 0:
# keep the related create position identifier if available
self.create_oid = order.order_id
self.position_id = order.position_id
if not self.eot and order.created_time:
# only at the first open
self.eot = order.created_time
return True
else:
self._entry_state = StrategyTrade.STATE_REJECTED
return False
def reopen(self, trader: Trader, instrument: Instrument, quantity: float) -> bool:
if self._entry_state != StrategyTrade.STATE_CANCELED:
return False
# reset
self._entry_state = StrategyTrade.STATE_NEW
self.eot = 0
order = Order(trader, instrument.market_id)
order.direction = self.dir
order.price = self.op
order.order_type = self._stats['entry-order-type']
order.quantity = quantity
order.post_only = False
order.margin_trade = True
order.leverage = self.leverage
# generated a reference order id
trader.set_ref_order_id(order)
self.create_ref_oid = order.ref_order_id
self.oq = order.quantity # ordered quantity
if trader.create_order(order, instrument) > 0:
self.create_oid = order.order_id
self.position_id = order.position_id
if not self.eot and order.created_time:
# only at the first open
self.eot = order.created_time
return True
else:
self._entry_state = StrategyTrade.STATE_REJECTED
return False
def remove(self, trader: Trader, instrument: Instrument) -> int:
"""
Remove the orders, but doesn't close the position.
"""
error = False
if self.create_oid:
# cancel the remaining buy order
if trader.cancel_order(self.create_oid, instrument) > 0:
self.create_ref_oid = None
self.create_oid = None
if self.e <= 0:
# no entry qty processed, entry canceled
self._entry_state = StrategyTrade.STATE_CANCELED
else:
# cancel a partially filled trade means it is then fully filled
self._entry_state = StrategyTrade.STATE_FILLED
else:
error = True
if self.stop_oid:
# cancel the stop order
if trader.cancel_order(self.stop_oid, instrument) > 0:
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
if self.e <= 0 and self.x <= 0:
# no exit qty
self._exit_state = StrategyTrade.STATE_CANCELED
elif self.x >= self.e:
self._exit_state = StrategyTrade.STATE_FILLED
else:
self._exit_state = StrategyTrade.STATE_PARTIALLY_FILLED
else:
error = True
if self.limit_oid:
# cancel the limit order
if trader.cancel_order(self.limit_oid, instrument) > 0:
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
if self.e <= 0 and self.x <= 0:
# no exit qty
self._exit_state = StrategyTrade.STATE_CANCELED
elif self.x >= self.e:
self._exit_state = StrategyTrade.STATE_FILLED
else:
self._exit_state = StrategyTrade.STATE_PARTIALLY_FILLED
else:
error = True
return not error
def cancel_open(self, trader: Trader, instrument: Instrument) -> int:
if self.create_oid:
# cancel the buy order
if trader.cancel_order(self.create_oid, instrument) > 0:
self.create_ref_oid = None
self.create_oid = None
if self.e <= 0:
# cancel a just opened trade means it is canceled
self._entry_state = StrategyTrade.STATE_CANCELED
else:
# cancel a partially filled trade means it is then fully filled
self._entry_state = StrategyTrade.STATE_FILLED
return self.ACCEPTED
else:
data = trader.order_info(self.create_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no create order, nothing to do
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_CANCELED
else:
# exists, do nothing need to retry
return self.ERROR
return self.NOTHING_TO_DO
def modify_take_profit(self, trader: Trader, instrument: Instrument, limit_price: float, hard: bool = True) -> int:
if self._closing:
# already closing order
return self.NOTHING_TO_DO
if self._exit_state == StrategyTrade.STATE_FILLED:
# exit already fully filled
return self.NOTHING_TO_DO
if self.limit_oid:
# cancel the limit order and create a new one
if trader.cancel_order(self.limit_oid, instrument) > 0:
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
data = trader.order_info(self.limit_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no limit order
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
return self.ERROR
if self.x >= self.e:
# all entry qty is filled, if lesser something wrong but its ok
return self.NOTHING_TO_DO
if limit_price and hard:
# only if filled entry partially or totally
order = Order(trader, instrument.market_id)
order.direction = -self.direction
order.order_type = Order.ORDER_LIMIT
order.reduce_only = True
order.quantity = self.e - self.x # remaining
order.price = limit_price
order.margin_trade = True
order.leverage = self.leverage
trader.set_ref_order_id(order)
self.limit_ref_oid = order.ref_order_id
self._stats['take-profit-order-type'] = order.order_type
create_order_result = trader.create_order(order, instrument)
if create_order_result > 0:
self.limit_oid = order.order_id
self.limit_order_qty = order.quantity
self.last_tp_ot[0] = order.created_time
self.last_tp_ot[1] += 1
self.tp = limit_price
return self.ACCEPTED
elif create_order_result == Order.REASON_INSUFFICIENT_MARGIN:
# rejected because not enough margin, must stop to retry
self.limit_ref_oid = None
self.limit_order_qty = 0.0
self._exit_state = self.STATE_ERROR
return self.INSUFFICIENT_MARGIN
else:
self.limit_ref_oid = None
self.limit_order_qty = 0.0
return self.REJECTED
elif limit_price:
# soft take-profit
self.tp = limit_price
else:
# remove take-profit
self.tp = 0.0
return self.NOTHING_TO_DO
def modify_stop_loss(self, trader: Trader, instrument: Instrument, stop_price: float, hard: bool = True) -> int:
if self._closing:
# already closing order
return self.NOTHING_TO_DO
if self._exit_state == StrategyTrade.STATE_FILLED:
# exit already fully filled
return self.NOTHING_TO_DO
if self.stop_oid:
# cancel the stop order and create a new one
if trader.cancel_order(self.stop_oid, instrument) > 0:
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
data = trader.order_info(self.stop_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no stop order
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
return self.ERROR
if self.x >= self.e:
# all entry qty is filled, if lesser something wrong but its ok
return self.NOTHING_TO_DO
if stop_price and hard:
# only if filled entry partially or totally
order = Order(trader, instrument.market_id)
order.direction = -self.direction
order.order_type = Order.ORDER_STOP
order.reduce_only = True
order.quantity = self.e - self.x # remaining
order.stop_price = stop_price
order.leverage = self.leverage
order.margin_trade = True
trader.set_ref_order_id(order)
self.stop_ref_oid = order.ref_order_id
self._stats['stop-order-type'] = order.order_type
create_order_result = trader.create_order(order, instrument)
if create_order_result > 0:
self.stop_oid = order.order_id
self.stop_order_qty = order.quantity
self.last_stop_ot[0] = order.created_time
self.last_stop_ot[1] += 1
self.sl = stop_price
return self.ACCEPTED
elif create_order_result == Order.REASON_INSUFFICIENT_MARGIN:
# rejected because not enough margin, must stop to retry
self.stop_ref_oid = None
self.stop_order_qty = 0.0
self._exit_state = self.STATE_ERROR
return self.INSUFFICIENT_MARGIN
else:
self.stop_ref_oid = None
self.stop_order_qty = 0.0
return self.REJECTED
elif stop_price:
# soft stop-loss
self.sl = stop_price
else:
# remove stop-loss
self.sl = 0.0
return self.NOTHING_TO_DO
def close(self, trader: Trader, instrument: Instrument) -> int:
"""
Close the position and cancel the related orders.
"""
if self._closing:
# already closing order
return self.NOTHING_TO_DO
if self.create_oid:
# cancel the remaining buy order
if trader.cancel_order(self.create_oid, instrument) > 0:
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_CANCELED
else:
data = trader.order_info(self.create_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no create order
self.create_ref_oid = None
self.create_oid = None
else:
return self.ERROR
if self.stop_oid:
# cancel the stop order
if trader.cancel_order(self.stop_oid, instrument) > 0:
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
data = trader.order_info(self.stop_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no stop order
self.stop_ref_oid = None
self.stop_oid = None
self.stop_order_qty = 0.0
else:
return self.ERROR
if self.limit_oid:
# cancel the limit order
if trader.cancel_order(self.limit_oid, instrument) > 0:
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
data = trader.order_info(self.limit_oid, instrument)
if data is None:
# API error, do nothing need retry
return self.ERROR
elif data['id'] is None:
# cannot retrieve the order, wrong id, no limit order
self.limit_ref_oid = None
self.limit_oid = None
self.limit_order_qty = 0.0
else:
return self.ERROR
if self.x >= self.e:
# all qty is filled
return self.NOTHING_TO_DO
order = Order(trader, instrument.market_id)
order.direction = -self.dir # neg dir
order.order_type = Order.ORDER_MARKET
order.reduce_only = True
order.quantity = self.e - self.x # remaining qty
order.margin_trade = True
order.leverage = self.leverage
# generated a reference order id
trader.set_ref_order_id(order)
self.stop_ref_oid = order.ref_order_id
self._stats['stop-order-type'] = order.order_type
create_order_result = trader.create_order(order, instrument)
if create_order_result > 0:
self.stop_oid = order.order_id
self.stop_order_qty = order.quantity
# closing order defined
self._closing = True
return self.ACCEPTED
elif create_order_result == Order.REASON_INSUFFICIENT_MARGIN:
# rejected because not enough margin, must stop to retry
self.stop_ref_oid = None
self.stop_order_qty = 0.0
self._exit_state = self.STATE_ERROR
return self.INSUFFICIENT_MARGIN
else:
self.stop_ref_oid = None
self.stop_order_qty = 0.0
return self.REJECTED
def has_stop_order(self) -> bool:
return self.stop_oid is not None and self.stop_oid != ""
def has_limit_order(self) -> bool:
return self.limit_oid is not None and self.limit_oid != ""
def support_both_order(self) -> bool:
return True
@classmethod
def is_margin(cls) -> bool:
return True
@classmethod
def is_spot(cls) -> bool:
return False
#
# signal
#
def order_signal(self, signal_type: int, data: dict, ref_order_id: str, instrument: Instrument):
if signal_type == Signal.SIGNAL_ORDER_OPENED:
# already get at the return of create_order
if ref_order_id == self.create_ref_oid:
self.create_oid = data['id']
# init created timestamp at the create order open
if not self.eot:
self.eot = data['timestamp']
if data.get('stop-loss'):
self.sl = data['stop-loss']
if data.get('take-profit'):
self.tp = data['take-profit']
self._entry_state = StrategyTrade.STATE_OPENED
elif ref_order_id == self.stop_ref_oid:
self.stop_oid = data['id']
if not self.xot:
self.xot = data['timestamp']
elif ref_order_id == self.limit_ref_oid:
self.limit_oid = data['id']
if not self.xot:
self.xot = data['timestamp']
elif signal_type == Signal.SIGNAL_ORDER_DELETED:
# order is no longer active
if data == self.create_oid:
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_DELETED
elif data == self.limit_oid:
self.limit_ref_oid = None
self.limit_oid = None
elif data == self.stop_oid:
self.stop_ref_oid = None
self.stop_oid = None
elif signal_type == Signal.SIGNAL_ORDER_CANCELED:
# order is no longer active
if data == self.create_oid:
self.create_ref_oid = None
self.create_oid = None
self._entry_state = StrategyTrade.STATE_CANCELED
elif data == self.limit_oid:
self.limit_ref_oid = None
self.limit_oid = None
elif data == self.stop_oid:
self.stop_ref_oid = None
self.stop_oid = None
elif signal_type == Signal.SIGNAL_ORDER_UPDATED:
# order price/qty modified, cannot really be used because the strategy might
# cancel the trade or create another one.
# for the qty we could have a remaining_qty member, then comparing
pass
elif signal_type == Signal.SIGNAL_ORDER_TRADED:
# order fully or partially filled
filled = 0
if data['id'] == self.create_oid:
prev_e = self.e
# a single order for the entry, then its OK and preferred to uses cumulative-filled and avg-price
# because precision comes from the broker
if data.get('cumulative-filled') is not None and data['cumulative-filled'] > 0:
filled = data['cumulative-filled'] - self.e # compute filled qty
elif data.get('filled') is not None and data['filled'] > 0:
filled = data['filled']
else:
filled = 0
if data.get('avg-price') is not None and data['avg-price'] > 0:
# in that case we have avg-price already computed
self.aep = data['avg-price']
elif data.get('exec-price') is not None and data['exec-price'] > 0:
# compute the average price
self.aep = ((self.aep * self.e) + (data['exec-price'] * filled)) / (self.e + filled)
else:
# no have uses order price
self.aep = self.op
# cumulative filled entry qty
if data.get('cumulative-filled') is not None:
self.e = data.get('cumulative-filled')
elif filled > 0:
self.e = instrument.adjust_quantity(self.e + filled)
if filled > 0:
# probably need to update exit orders
self._dirty = True
logger.info("Entry avg-price=%s cum-filled=%s" % (self.aep, self.e))
if self.e >= self.oq:
self._entry_state = StrategyTrade.STATE_FILLED
# if no send of ORDER_DELETED signal, cleanup here
self.create_oid = None
self.create_ref_oid = None
else:
self._entry_state = StrategyTrade.STATE_PARTIALLY_FILLED
# retains the trade timestamp
if not self._stats['first-realized-entry-timestamp']:
self._stats['first-realized-entry-timestamp'] = data.get('timestamp', 0.0)
self._stats['last-realized-entry-timestamp'] = data.get('timestamp', 0.0)
elif data['id'] == self.limit_oid or data['id'] == self.stop_oid:
prev_x = self.x
# either we have 'filled' component (partial qty) or the 'cumulative-filled' or both
if data.get('cumulative-filled') is not None and data['cumulative-filled'] > 0:
filled = data['cumulative-filled'] - self.x # computed filled qty
elif data.get('filled') is not None and data['filled'] > 0:
filled = data['filled']
else:
filled = 0
if data.get('avg-price') is not None and data['avg-price'] > 0:
# recompute profit-loss
if self.dir > 0:
self.pl = (data['avg-price'] - self.aep) / self.aep
elif self.dir < 0:
self.pl = (self.aep - data['avg-price']) / self.aep
# in that case we have avg-price already computed
self.axp = data['avg-price']
elif data.get('exec-price') is not None and data['exec-price'] > 0:
# increase/decrease profit/loss (over entry executed quantity)
if self.dir > 0:
self.pl += ((data['exec-price'] * filled) - (self.aep * filled)) / (self.aep * self.e)
elif self.dir < 0:
self.pl += ((self.aep * filled) - (data['exec-price'] * filled)) / (self.aep * self.e)
# compute the average price
self.axp = ((self.axp * self.x) + (data['exec-price'] * filled)) / (self.x + filled)
# cumulative filled exit qty
if data.get('cumulative-filled') is not None:
self.x = data.get('cumulative-filled')
elif filled > 0:
self.x = instrument.adjust_quantity(self.x + filled)
logger.info("Exit avg-price=%s cum-filled=%s" % (self.axp, self.x))
if self.x >= self.oq:
self._exit_state = StrategyTrade.STATE_FILLED
# if no send of ORDER_DELETED signal, cleanup here
if data['id'] == self.limit_oid:
self.limit_oid = None
self.limit_ref_oid = None
elif data['id'] == self.stop_oid:
self.stop_oid = None
self.stop_ref_oid = None
else:
self._exit_state = StrategyTrade.STATE_PARTIALLY_FILLED
# retains the trade timestamp
if not self._stats['first-realized-exit-timestamp']:
self._stats['first-realized-exit-timestamp'] = data.get('timestamp', 0.0)
self._stats['last-realized-exit-timestamp'] = data.get('timestamp', 0.0)
def position_signal(self, signal_type: int, data: dict, ref_order_id: str, instrument: Instrument):
if signal_type == Signal.SIGNAL_POSITION_OPENED:
self.position_id = data['id']
if data.get('profit-loss'):
self._stats['unrealized-profit-loss'] = data['profit-loss']
if data.get('profit-currency'):
self._stats['profit-loss-currency'] = data['profit-currency']
elif signal_type == Signal.SIGNAL_POSITION_UPDATED:
# update the unrealized profit-loss in currency
if data.get('profit-loss'):
self._stats['unrealized-profit-loss'] = data['profit-loss']
if data.get('profit-currency'):
self._stats['profit-loss-currency'] = data['profit-currency']
elif signal_type == Signal.SIGNAL_POSITION_DELETED:
# no longer related position
self.position_id = None
if data.get('profit-loss'):
self._stats['unrealized-profit-loss'] = data['profit-loss']
if data.get('profit-currency'):
self._stats['profit-loss-currency'] = data['profit-currency']
elif signal_type == Signal.SIGNAL_POSITION_AMENDED:
# might not occurs
pass
def is_target_order(self, order_id: str, ref_order_id: str) -> bool:
if order_id and (order_id == self.create_oid or order_id == self.stop_oid or order_id == self.limit_oid):
return True
if ref_order_id and (ref_order_id == self.create_ref_oid or
ref_order_id == self.stop_ref_oid or
ref_order_id == self.limit_ref_oid):
return True
return False
def is_target_position(self, position_id: str, ref_order_id: str) -> bool:
if position_id and (position_id == self.position_id):
return True
if ref_order_id and (ref_order_id == self.create_ref_oid):
return True
#
# persistence
#
def dumps(self) -> dict:
data = super().dumps()
data['create-ref-oid'] = self.create_ref_oid
data['stop-ref-oid'] = self.stop_ref_oid
data['limit-ref-oid'] = self.limit_ref_oid
data['create-oid'] = self.create_oid
data['stop-oid'] = self.stop_oid
data['limit-oid'] = self.limit_oid
data['position-id'] = self.position_id
data['stop-order-qty'] = self.stop_order_qty
data['limit-order-qty'] = self.limit_order_qty
return data
def loads(self, data: dict, strategy_trader: StrategyTrader,
context_builder: Optional[StrategyTraderContextBuilder] = None) -> bool:
if not super().loads(data, strategy_trader, context_builder):
return False
self.create_ref_oid = data.get('create-ref-oid')
self.stop_ref_oid = data.get('stop-ref-oid')
self.limit_ref_oid = data.get('limit-ref-oid')
self.create_oid = data.get('create-oid')
self.stop_oid = data.get('stop-oid')
self.limit_oid = data.get('limit-oid')
self.position_id = data.get('position-id')
self.stop_order_qty = data.get('stop-order-qty', 0.0)
self.limit_order_qty = data.get('limit-order-qty', 0.0)
return True
def check(self, trader: Trader, instrument: Instrument) -> int:
result = 1
#
# entry
#
if self.create_oid:
data = trader.order_info(self.create_oid, instrument)
if data is None:
# API error, do nothing need retry
result = -1
# entry order error status
# self._entry_state = StrategyTrade.STATE_ERROR
else:
if data['id'] is None:
# cannot retrieve the order, wrong id
result = 0
# no longer entry order
self.create_oid = None
self.create_ref_oid = None
else:
if data['cumulative-filled'] > self.e or data['fully-filled']:
self.order_signal(Signal.SIGNAL_ORDER_TRADED, data, data['ref-id'], instrument)
if data['status'] in ('closed', 'deleted'):
self.order_signal(Signal.SIGNAL_ORDER_DELETED, data['id'], data['ref-id'], instrument)
elif data['status'] in ('expired', 'canceled'):
self.order_signal(Signal.SIGNAL_ORDER_CANCELED, data['id'], data['ref-id'], instrument)
#
# exit
#
if self.stop_oid:
data = trader.order_info(self.stop_oid, instrument)
if data is None:
# API error, do nothing need retry
result = -1
# exit order error status
# self._exit_state = StrategyTrade.STATE_ERROR
else:
if data['id'] is None:
# cannot retrieve the order, wrong id
result = 0
# no longer stop order
self.stop_oid = None
self.stop_ref_oid = None
else:
if data['cumulative-filled'] > self.x or data['fully-filled']:
self.order_signal(Signal.SIGNAL_ORDER_TRADED, data, data['ref-id'], instrument)
if data['status'] in ('closed', 'deleted'):
self.order_signal(Signal.SIGNAL_ORDER_DELETED, data['id'], data['ref-id'], instrument)
elif data['status'] in ('expired', 'canceled'):
self.order_signal(Signal.SIGNAL_ORDER_CANCELED, data['id'], data['ref-id'], instrument)
if self.limit_oid:
data = trader.order_info(self.limit_oid, instrument)
if data is None:
# API error, do nothing need retry
result = -1
# exit order error status
# self._exit_state = StrategyTrade.STATE_ERROR
else:
if data['id'] is None:
# cannot retrieve the order, wrong id
result = 0
# no longer stop order
self.limit_oid = None
self.limit_ref_oid = None
else:
if data['cumulative-filled'] > self.x or data['fully-filled']:
self.order_signal(Signal.SIGNAL_ORDER_TRADED, data, data['ref-id'], instrument)
if data['status'] in ('closed', 'deleted'):
self.order_signal(Signal.SIGNAL_ORDER_DELETED, data['id'], data['ref-id'], instrument)
elif data['status'] in ('expired', 'canceled'):
self.order_signal(Signal.SIGNAL_ORDER_CANCELED, data['id'], data['ref-id'], instrument)
return result
def repair(self, trader: Trader, instrument: Instrument) -> bool:
# @todo fix the trade
return False
#
# stats
#
def update_stats(self, instrument: Instrument, timestamp: float):
super().update_stats(instrument, timestamp)
if self.is_active():
# @todo support only for quantity in asset not in lot or contract of different size
last_price = instrument.close_exec_price(self.direction)
upnl = 0.0 # unrealized PNL
rpnl = 0.0 # realized PNL
# non realized quantity
nrq = self.e - self.x
if self.dir > 0:
upnl = last_price * nrq - self.aep * nrq
rpnl = self.axp * self.x - self.aep * self.x
elif self.dir < 0:
upnl = self.aep * nrq - last_price * nrq
rpnl = self.aep * self.x - self.axp * self.x
# including fees and realized profit and loss
self._stats['unrealized-profit-loss'] = instrument.adjust_quote(
upnl + rpnl - self._stats['entry-fees'] - self._stats['exit-fees'])
def info_report(self, strategy_trader: StrategyTrader) -> Tuple[str]:
data = list(super().info_report(strategy_trader))
if self.create_oid or self.create_ref_oid:
data.append("Entry order id / ref : %s / %s" % (self.create_oid, self.create_ref_oid))
if self.stop_oid or self.stop_ref_oid:
data.append("Stop order id / ref : %s / %s" % (self.stop_oid, self.stop_ref_oid))
if self.limit_oid or self.limit_ref_oid:
data.append("Limit order id / ref : %s / %s" % (self.limit_oid, self.limit_ref_oid))
if self.position_id:
data.append("Position id : %s" % (self.position_id,))
return tuple(data)
| 37.352321 | 119 | 0.553347 | 34,719 | 0.980486 | 0 | 0 | 127 | 0.003587 | 0 | 0 | 7,465 | 0.210816 |
4050379eb6d6d6226db82e9fbcbba3933c358e43 | 718 | py | Python | src/pyramid_debugtoolbar_api_sqlalchemy/__init__.py | jvanasco/pyramid_debugtoolbar_api_sqla | 286a7e4ee32e7dd64f31813c46d59e0651534cbd | [
"MIT"
]
| null | null | null | src/pyramid_debugtoolbar_api_sqlalchemy/__init__.py | jvanasco/pyramid_debugtoolbar_api_sqla | 286a7e4ee32e7dd64f31813c46d59e0651534cbd | [
"MIT"
]
| null | null | null | src/pyramid_debugtoolbar_api_sqlalchemy/__init__.py | jvanasco/pyramid_debugtoolbar_api_sqla | 286a7e4ee32e7dd64f31813c46d59e0651534cbd | [
"MIT"
]
| null | null | null | # local
from .panels import SqlalchemyCsvDebugPanel
__VERSION__ = "0.3.1"
# ==============================================================================
def includeme(config):
"""
Pyramid hook to install this debugtoolbar plugin.
Update your ENVIRONMENT.ini file
debugtoolbar.includes = pyramid_debugtoolbar_api_sqlalchemy
"""
config.add_debugtoolbar_panel(SqlalchemyCsvDebugPanel)
config.add_route(
"debugtoolbar.api_sqlalchemy.queries.csv",
"/api-sqlalchemy/sqlalchemy-{request_id}.csv",
)
config.scan("pyramid_debugtoolbar_api_sqlalchemy.views")
config.commit()
# ==============================================================================
| 24.758621 | 80 | 0.558496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 475 | 0.66156 |
4050460227ae968820c1eb94e5dff24549e4e557 | 1,165 | py | Python | ultron/utilities/zlib_engine.py | wangjiehui11235/ultron | ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7 | [
"Apache-2.0"
]
| 4 | 2019-06-06T09:38:49.000Z | 2022-01-29T00:02:11.000Z | ultron/utilities/zlib_engine.py | wangjiehui11235/ultron | ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7 | [
"Apache-2.0"
]
| 1 | 2022-02-11T03:43:10.000Z | 2022-02-11T03:43:10.000Z | ultron/utilities/zlib_engine.py | wangjiehui11235/ultron | ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7 | [
"Apache-2.0"
]
| 8 | 2019-06-02T13:11:00.000Z | 2021-11-11T01:06:22.000Z | # -*- coding: utf-8 -*-
import os,os.path
import zipfile
def zip_compress(dir_name, zip_filename):
filelist = []
if os.path.isfile(dir_name):
filelist.append(dir_name)
else :
for root, dirs, files in os.walk(dir_name):
for name in files:
filelist.append(os.path.join(root, name))
zf = zipfile.ZipFile(zip_filename, "w", zipfile.zlib.DEFLATED)
for tar in filelist:
arcname = tar[len(dir_name):]
zf.write(tar,arcname)
zf.close()
def unzip_compress(zip_filename, unzip_dir):
if not os.path.exists(unzip_dir):
os.mkdir(unzip_dir)
zfobj = zipfile.ZipFile(zip_filename)
for name in zfobj.namelist():
name = name.replace('\\','/')
if name.endswith('/'):
os.mkdir(os.path.join(unzip_dir, name))
else:
ext_filename = os.path.join(unzip_dir, name)
ext_dir= os.path.dirname(ext_filename)
if not os.path.exists(ext_dir) :
os.mkdir(ext_dir)
outfile = open(ext_filename, 'wb')
outfile.write(zfobj.read(name))
outfile.close() | 32.361111 | 66 | 0.581974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.034335 |
4050b29c16a41f96705714cdbf17492431b85f0e | 1,985 | py | Python | scripts/instances2inventory.py | TipaZloy/coda-automation | 20c00a92f2e3088e205677c0db96b3ed5c82b238 | [
"Apache-2.0"
]
| null | null | null | scripts/instances2inventory.py | TipaZloy/coda-automation | 20c00a92f2e3088e205677c0db96b3ed5c82b238 | [
"Apache-2.0"
]
| null | null | null | scripts/instances2inventory.py | TipaZloy/coda-automation | 20c00a92f2e3088e205677c0db96b3ed5c82b238 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
import boto
import boto.ec2
import sys
from pprint import pprint
from collections import defaultdict
output = defaultdict(lambda: [])
comments = defaultdict(lambda: {})
skip_region_strings = ['us-gov', 'cn-', 'ca-']
#skip_region_strings = ['us-gov', 'cn-', 'ca-', 'eu-', 'ap-']
if len(sys.argv) > 1:
filter = sys.argv[1]
else:
filter = False
regions = boto.ec2.regions()
for region in regions:
if any (skip_string in region.name for skip_string in skip_region_strings):
continue
print('# Querying region:', region)
ec2conn = boto.connect_ec2(region=region)
reservations = ec2conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
for i in instances:
if filter:
if 'Name' in i.tags:
if filter not in i.tags['Name']:
continue
if 'running' not in i.state:
continue
if 'Name' in i.tags:
if 'Packer' in i.tags['Name']: continue
if i.tags['Name'].count('_') == 2:
try:
(net, group, num) = i.tags['Name'].split('_')
myregion = region.name
except:
print('Error parsing ', i.tags['Name'])
continue
elif i.tags['Name'].count('_') == 3:
try:
(net, myregion, group, num) = i.tags['Name'].split('_')
except:
print('Error parsing ', i.tags['Name'])
continue
groupname = "%ss" % group
else:
print('NONAME', end='')
groupname = 'unknown'
i.tags['Name'] = 'NONE'
output[groupname].append(i.public_dns_name)
try:
comments[groupname][i.public_dns_name] = "# %s\t%s\t%s\t%s\t%s" % (i.tags['Name'], myregion, i.instance_type, i.ip_address, i.launch_time)
except:
comments[groupname][i.public_dns_name] = "# MISSING DATA"
for group in output:
print("[%s]" % group)
hostlist = output[group]
hostlist.sort()
for host in hostlist:
print("%s \t%s" % (host, comments[group][host]))
print("\n")
| 25.126582 | 144 | 0.602519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.171788 |
4050f12cd3fda3e62426b196e960faffe455d7f7 | 938 | py | Python | selfdrive/crash.py | darknight111/openpilot3 | a0c755fbe1889f26404a8225816f57e89fde7bc2 | [
"MIT"
]
| 19 | 2020-08-05T12:11:58.000Z | 2022-03-07T01:18:56.000Z | selfdrive/crash.py | darknight111/openpilot3 | a0c755fbe1889f26404a8225816f57e89fde7bc2 | [
"MIT"
]
| 18 | 2020-08-20T05:17:38.000Z | 2021-12-06T09:02:00.000Z | selfdrive/crash.py | darknight111/openpilot3 | a0c755fbe1889f26404a8225816f57e89fde7bc2 | [
"MIT"
]
| 25 | 2020-08-30T09:10:14.000Z | 2022-02-20T02:31:13.000Z | """Install exception handler for process crash."""
from selfdrive.swaglog import cloudlog
from selfdrive.version import version
import sentry_sdk
from sentry_sdk.integrations.threading import ThreadingIntegration
def capture_exception(*args, **kwargs) -> None:
cloudlog.error("crash", exc_info=kwargs.get('exc_info', 1))
try:
sentry_sdk.capture_exception(*args, **kwargs)
sentry_sdk.flush() # https://github.com/getsentry/sentry-python/issues/291
except Exception:
cloudlog.exception("sentry exception")
def bind_user(**kwargs) -> None:
sentry_sdk.set_user(kwargs)
def bind_extra(**kwargs) -> None:
for k, v in kwargs.items():
sentry_sdk.set_tag(k, v)
def init() -> None:
sentry_sdk.init("https://[email protected]/5861866",
default_integrations=False, integrations=[ThreadingIntegration(propagate_hub=True)],
release=version)
| 33.5 | 102 | 0.735608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.229211 |
4051ffa508f128d4ca3a6951f908adec0dd2fce3 | 1,235 | py | Python | 0000_examples/grasping_antipodal_planning.py | huzhengtao14z/wrs | d567787ca41818f1756c325b304215faf7f10f29 | [
"MIT"
]
| null | null | null | 0000_examples/grasping_antipodal_planning.py | huzhengtao14z/wrs | d567787ca41818f1756c325b304215faf7f10f29 | [
"MIT"
]
| null | null | null | 0000_examples/grasping_antipodal_planning.py | huzhengtao14z/wrs | d567787ca41818f1756c325b304215faf7f10f29 | [
"MIT"
]
| null | null | null | import math
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import grasping.planning.antipodal as gpa
import robot_sim.end_effectors.grippers.yumi_gripper.yumi_gripper as yg
base = wd.World(cam_pos=[1, 1, 1],w=960,
h=540, lookat_pos=[0, 0, 0])
gm.gen_frame().attach_to(base)
# object
object_tube = cm.CollisionModel("objects/tubebig.stl")
object_tube.set_rgba([.9, .75, .35, 1])
object_tube.attach_to(base)
# hnd_s
gripper_s = yg.YumiGripper()
grasp_info_list = gpa.plan_grasps(gripper_s, object_tube,
angle_between_contact_normals=math.radians(177),
openning_direction='loc_x',
max_samples=15, min_dist_between_sampled_contact_points=.005,
contact_offset=.005)
gpa.write_pickle_file('tubebig', grasp_info_list, './', 'yumi_tube_big.pickle')
for grasp_info in grasp_info_list:
jaw_width, jaw_center_pos, jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
gripper_s.grip_at_with_jcpose(jaw_center_pos, jaw_center_rotmat, jaw_width)
gripper_s.gen_meshmodel(rgba=(1,0,0,0.01)).attach_to(base)
base.run() | 44.107143 | 95 | 0.701215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.063158 |
40544e3050932f38de418744707458dee5d3337b | 60,103 | py | Python | keystone/assignment/core.py | pritha-srivastava/keystone | 69abe058328954becdea13cc245459f2ba2342fc | [
"Apache-2.0"
]
| null | null | null | keystone/assignment/core.py | pritha-srivastava/keystone | 69abe058328954becdea13cc245459f2ba2342fc | [
"Apache-2.0"
]
| null | null | null | keystone/assignment/core.py | pritha-srivastava/keystone | 69abe058328954becdea13cc245459f2ba2342fc | [
"Apache-2.0"
]
| null | null | null | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Assignment service."""
import copy
import itertools
from oslo_log import log
from keystone.common import cache
from keystone.common import driver_hints
from keystone.common import manager
from keystone.common import provider_api
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone import notifications
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
PROVIDERS = provider_api.ProviderAPIs
# This is a general cache region for assignment administration (CRUD
# operations).
MEMOIZE = cache.get_memoization_decorator(group='role')
# This builds a discrete cache region dedicated to role assignments computed
# for a given user + project/domain pair. Any write operation to add or remove
# any role assignment should invalidate this entire cache region.
COMPUTED_ASSIGNMENTS_REGION = cache.create_region(name='computed assignments')
MEMOIZE_COMPUTED_ASSIGNMENTS = cache.get_memoization_decorator(
group='role',
region=COMPUTED_ASSIGNMENTS_REGION)
@notifications.listener
class Manager(manager.Manager):
"""Default pivot point for the Assignment backend.
See :class:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
driver_namespace = 'keystone.assignment'
_provides_api = 'assignment_api'
_SYSTEM_SCOPE_TOKEN = 'system'
_USER_SYSTEM = 'UserSystem'
_GROUP_SYSTEM = 'GroupSystem'
_PROJECT = 'project'
_ROLE_REMOVED_FROM_USER = 'role_removed_from_user'
_INVALIDATION_USER_PROJECT_TOKENS = 'invalidate_user_project_tokens'
def __init__(self):
assignment_driver = CONF.assignment.driver
super(Manager, self).__init__(assignment_driver)
self.event_callbacks = {
notifications.ACTIONS.deleted: {
'domain': [self._delete_domain_assignments],
},
}
def _delete_domain_assignments(self, service, resource_type, operations,
payload):
domain_id = payload['resource_info']
self.driver.delete_domain_assignments(domain_id)
def _get_group_ids_for_user_id(self, user_id):
# TODO(morganfainberg): Implement a way to get only group_ids
# instead of the more expensive to_dict() call for each record.
return [x['id'] for
x in PROVIDERS.identity_api.list_groups_for_user(user_id)]
def list_user_ids_for_project(self, tenant_id):
PROVIDERS.resource_api.get_project(tenant_id)
assignment_list = self.list_role_assignments(
project_id=tenant_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['user_id'] for x in assignment_list]))
def _send_app_cred_notification_for_role_removal(self, role_id):
"""Delete all application credential for a specific role.
:param role_id: role identifier
:type role_id: string
"""
assignments = self.list_role_assignments(role_id=role_id)
for assignment in assignments:
if 'user_id' in assignment and 'project_id' in assignment:
payload = {
'user_id': assignment['user_id'],
'project_id': assignment['project_id']
}
notifications.Audit.internal(
notifications.REMOVE_APP_CREDS_FOR_USER, payload
)
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_user_and_project(self, user_id, tenant_id):
"""Get the roles associated with a user within given project.
This includes roles directly assigned to the user on the
project, as well as those by virtue of group membership or
inheritance.
:returns: a list of role ids.
:raises keystone.exception.ProjectNotFound: If the project doesn't
exist.
"""
PROVIDERS.resource_api.get_project(tenant_id)
assignment_list = self.list_role_assignments(
user_id=user_id, project_id=tenant_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_trustor_and_project(self, trustor_id, project_id):
"""Get the roles associated with a trustor within given project.
This includes roles directly assigned to the trustor on the
project, as well as those by virtue of group membership or
inheritance, but it doesn't include the domain roles.
:returns: a list of role ids.
:raises keystone.exception.ProjectNotFound: If the project doesn't
exist.
"""
PROVIDERS.resource_api.get_project(project_id)
assignment_list = self.list_role_assignments(
user_id=trustor_id, project_id=project_id, effective=True,
strip_domain_roles=False)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
@MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_user_and_domain(self, user_id, domain_id):
"""Get the roles associated with a user within given domain.
:returns: a list of role ids.
:raises keystone.exception.DomainNotFound: If the domain doesn't exist.
"""
PROVIDERS.resource_api.get_domain(domain_id)
assignment_list = self.list_role_assignments(
user_id=user_id, domain_id=domain_id, effective=True)
# Use set() to process the list to remove any duplicates
return list(set([x['role_id'] for x in assignment_list]))
def get_roles_for_groups(self, group_ids, project_id=None, domain_id=None):
"""Get a list of roles for this group on domain and/or project."""
# if no group ids were passed, there are no roles. Without this check,
# all assignments for the project or domain will be fetched,
# which is not what we want.
if not group_ids:
return []
if project_id is not None:
PROVIDERS.resource_api.get_project(project_id)
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, project_id=project_id,
effective=True)
elif domain_id is not None:
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, domain_id=domain_id,
effective=True)
else:
raise AttributeError(_("Must specify either domain or project"))
role_ids = list(set([x['role_id'] for x in assignment_list]))
return PROVIDERS.role_api.list_roles_from_ids(role_ids)
@notifications.role_assignment('created')
def _add_role_to_user_and_project_adapter(self, role_id, user_id=None,
group_id=None, domain_id=None,
project_id=None,
inherited_to_projects=False,
context=None):
# The parameters for this method must match the parameters for
# create_grant so that the notifications.role_assignment decorator
# will work.
PROVIDERS.resource_api.get_project(project_id)
PROVIDERS.role_api.get_role(role_id)
self.driver.add_role_to_user_and_project(user_id, project_id, role_id)
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
self._add_role_to_user_and_project_adapter(
role_id, user_id=user_id, project_id=tenant_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
@MEMOIZE_COMPUTED_ASSIGNMENTS
def list_projects_for_user(self, user_id):
# FIXME(lbragstad): Without the use of caching, listing effective role
# assignments is slow, especially with large data set (lots of users
# with multiple role assignments). This should serve as a marker in
# case we have the opportunity to come back and optimize this code so
# that it can be performant without having a hard dependency on
# caching. Please see https://bugs.launchpad.net/keystone/+bug/1700852
# for more details.
assignment_list = self.list_role_assignments(
user_id=user_id, effective=True)
# Use set() to process the list to remove any duplicates
project_ids = list(set([x['project_id'] for x in assignment_list
if x.get('project_id')]))
return PROVIDERS.resource_api.list_projects_from_ids(project_ids)
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
@MEMOIZE_COMPUTED_ASSIGNMENTS
def list_domains_for_user(self, user_id):
assignment_list = self.list_role_assignments(
user_id=user_id, effective=True)
# Use set() to process the list to remove any duplicates
domain_ids = list(set([x['domain_id'] for x in assignment_list
if x.get('domain_id')]))
return PROVIDERS.resource_api.list_domains_from_ids(domain_ids)
def list_domains_for_groups(self, group_ids):
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, effective=True)
domain_ids = list(set([x['domain_id'] for x in assignment_list
if x.get('domain_id')]))
return PROVIDERS.resource_api.list_domains_from_ids(domain_ids)
def list_projects_for_groups(self, group_ids):
assignment_list = self.list_role_assignments(
source_from_group_ids=group_ids, effective=True)
project_ids = list(set([x['project_id'] for x in assignment_list
if x.get('project_id')]))
return PROVIDERS.resource_api.list_projects_from_ids(project_ids)
@notifications.role_assignment('deleted')
def _remove_role_from_user_and_project_adapter(self, role_id, user_id=None,
group_id=None,
domain_id=None,
project_id=None,
inherited_to_projects=False,
context=None):
# The parameters for this method must match the parameters for
# delete_grant so that the notifications.role_assignment decorator
# will work.
self.driver.remove_role_from_user_and_project(user_id, project_id,
role_id)
payload = {'user_id': user_id, 'project_id': project_id}
notifications.Audit.internal(
notifications.REMOVE_APP_CREDS_FOR_USER,
payload
)
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
self._remove_role_from_user_and_project_adapter(
role_id, user_id=user_id, project_id=tenant_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
def _invalidate_token_cache(self, role_id, group_id, user_id, project_id,
domain_id):
if group_id:
actor_type = 'group'
actor_id = group_id
elif user_id:
actor_type = 'user'
actor_id = user_id
if domain_id:
target_type = 'domain'
target_id = domain_id
elif project_id:
target_type = 'project'
target_id = project_id
reason = (
'Invalidating the token cache because role %(role_id)s was '
'removed from %(actor_type)s %(actor_id)s on %(target_type)s '
'%(target_id)s.' %
{'role_id': role_id, 'actor_type': actor_type,
'actor_id': actor_id, 'target_type': target_type,
'target_id': target_id}
)
notifications.invalidate_token_cache_notification(reason)
@notifications.role_assignment('created')
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False,
initiator=None):
role = PROVIDERS.role_api.get_role(role_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
project = PROVIDERS.resource_api.get_project(project_id)
# For domain specific roles, the domain of the project
# and role must match
if role['domain_id'] and project['domain_id'] != role['domain_id']:
raise exception.DomainSpecificRoleMismatch(
role_id=role_id,
project_id=project_id)
self.driver.create_grant(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
def get_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
role_ref = PROVIDERS.role_api.get_role(role_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
self.check_grant_role_id(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
return role_ref
def list_grants(self, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
grant_ids = self.list_grant_role_ids(
user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
@notifications.role_assignment('deleted')
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False,
initiator=None):
# check if role exist before any processing
PROVIDERS.role_api.get_role(role_id)
if group_id is None:
# check if role exists on the user before revoke
self.check_grant_role_id(
role_id, user_id=user_id, group_id=None, domain_id=domain_id,
project_id=project_id,
inherited_to_projects=inherited_to_projects
)
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
else:
try:
# check if role exists on the group before revoke
self.check_grant_role_id(
role_id, user_id=None, group_id=group_id,
domain_id=domain_id, project_id=project_id,
inherited_to_projects=inherited_to_projects
)
if CONF.token.revoke_by_id:
self._invalidate_token_cache(
role_id, group_id, user_id, project_id, domain_id
)
except exception.GroupNotFound:
LOG.debug('Group %s not found, no tokens to invalidate.',
group_id)
if domain_id:
PROVIDERS.resource_api.get_domain(domain_id)
if project_id:
PROVIDERS.resource_api.get_project(project_id)
self.driver.delete_grant(
role_id, user_id=user_id, group_id=group_id, domain_id=domain_id,
project_id=project_id, inherited_to_projects=inherited_to_projects
)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# The methods _expand_indirect_assignment, _list_direct_role_assignments
# and _list_effective_role_assignments below are only used on
# list_role_assignments, but they are not in its scope as nested functions
# since it would significantly increase McCabe complexity, that should be
# kept as it is in order to detect unnecessarily complex code, which is not
# this case.
def _expand_indirect_assignment(self, ref, user_id=None, project_id=None,
subtree_ids=None, expand_groups=True):
"""Return a list of expanded role assignments.
This methods is called for each discovered assignment that either needs
a group assignment expanded into individual user assignments, or needs
an inherited assignment to be applied to its children.
In all cases, if either user_id and/or project_id is specified, then we
filter the result on those values.
If project_id is specified and subtree_ids is None, then this
indicates that we are only interested in that one project. If
subtree_ids is not None, then this is an indicator that any
inherited assignments need to be expanded down the tree. The
actual subtree_ids don't need to be used as a filter here, since we
already ensured only those assignments that could affect them
were passed to this method.
If expand_groups is True then we expand groups out to a list of
assignments, one for each member of that group.
"""
def create_group_assignment(base_ref, user_id):
"""Create a group assignment from the provided ref."""
ref = copy.deepcopy(base_ref)
ref['user_id'] = user_id
indirect = ref.setdefault('indirect', {})
indirect['group_id'] = ref.pop('group_id')
return ref
def expand_group_assignment(ref, user_id):
"""Expand group role assignment.
For any group role assignment on a target, it is replaced by a list
of role assignments containing one for each user of that group on
that target.
An example of accepted ref is::
{
'group_id': group_id,
'project_id': project_id,
'role_id': role_id
}
Once expanded, it should be returned as a list of entities like the
one below, one for each each user_id in the provided group_id.
::
{
'user_id': user_id,
'project_id': project_id,
'role_id': role_id,
'indirect' : {
'group_id': group_id
}
}
Returned list will be formatted by the Controller, which will
deduce a role assignment came from group membership if it has both
'user_id' in the main body of the dict and 'group_id' in indirect
subdict.
"""
if user_id:
return [create_group_assignment(ref, user_id=user_id)]
# Note(prashkre): Try to get the users in a group,
# if a group wasn't found in the backend, users are set
# as empty list.
try:
users = PROVIDERS.identity_api.list_users_in_group(
ref['group_id'])
except exception.GroupNotFound:
LOG.warning('Group %(group)s was not found but still has role '
'assignments.', {'group': ref['group_id']})
users = []
return [create_group_assignment(ref, user_id=m['id'])
for m in users]
def expand_inherited_assignment(ref, user_id, project_id, subtree_ids,
expand_groups):
"""Expand inherited role assignments.
If expand_groups is True and this is a group role assignment on a
target, replace it by a list of role assignments containing one for
each user of that group, on every project under that target. If
expand_groups is False, then return a group assignment on an
inherited target.
If this is a user role assignment on a specific target (i.e.
project_id is specified, but subtree_ids is None) then simply
format this as a single assignment (since we are effectively
filtering on project_id). If however, project_id is None or
subtree_ids is not None, then replace this one assignment with a
list of role assignments for that user on every project under
that target.
An example of accepted ref is::
{
'group_id': group_id,
'project_id': parent_id,
'role_id': role_id,
'inherited_to_projects': 'projects'
}
Once expanded, it should be returned as a list of entities like the
one below, one for each each user_id in the provided group_id and
for each subproject_id in the project_id subtree.
::
{
'user_id': user_id,
'project_id': subproject_id,
'role_id': role_id,
'indirect' : {
'group_id': group_id,
'project_id': parent_id
}
}
Returned list will be formatted by the Controller, which will
deduce a role assignment came from group membership if it has both
'user_id' in the main body of the dict and 'group_id' in the
'indirect' subdict, as well as it is possible to deduce if it has
come from inheritance if it contains both a 'project_id' in the
main body of the dict and 'parent_id' in the 'indirect' subdict.
"""
def create_inherited_assignment(base_ref, project_id):
"""Create a project assignment from the provided ref.
base_ref can either be a project or domain inherited
assignment ref.
"""
ref = copy.deepcopy(base_ref)
indirect = ref.setdefault('indirect', {})
if ref.get('project_id'):
indirect['project_id'] = ref.pop('project_id')
else:
indirect['domain_id'] = ref.pop('domain_id')
ref['project_id'] = project_id
ref.pop('inherited_to_projects')
return ref
# Define expanded project list to which to apply this assignment
if project_id:
# Since ref is an inherited assignment and we are filtering by
# project(s), we are only going to apply the assignment to the
# relevant project(s)
project_ids = [project_id]
if subtree_ids:
project_ids += subtree_ids
# If this is a domain inherited assignment, then we know
# that all the project_ids will get this assignment. If
# it's a project inherited assignment, and the assignment
# point is an ancestor of project_id, then we know that
# again all the project_ids will get the assignment. If,
# however, the assignment point is within the subtree,
# then only a partial tree will get the assignment.
resource_api = PROVIDERS.resource_api
if ref.get('project_id'):
if ref['project_id'] in project_ids:
project_ids = (
[x['id'] for x in
resource_api.list_projects_in_subtree(
ref['project_id'])])
elif ref.get('domain_id'):
# A domain inherited assignment, so apply it to all projects
# in this domain
project_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_domain(
ref['domain_id'])])
else:
# It must be a project assignment, so apply it to its subtree
project_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_subtree(
ref['project_id'])])
new_refs = []
if 'group_id' in ref:
if expand_groups:
# Expand role assignment to all group members on any
# inherited target of any of the projects
for ref in expand_group_assignment(ref, user_id):
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
else:
# Just place the group assignment on any inherited target
# of any of the projects
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
else:
# Expand role assignment for all projects
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
return new_refs
if ref.get('inherited_to_projects') == 'projects':
return expand_inherited_assignment(
ref, user_id, project_id, subtree_ids, expand_groups)
elif 'group_id' in ref and expand_groups:
return expand_group_assignment(ref, user_id)
return [ref]
def add_implied_roles(self, role_refs):
"""Expand out implied roles.
The role_refs passed in have had all inheritance and group assignments
expanded out. We now need to look at the role_id in each ref and see
if it is a prior role for some implied roles. If it is, then we need to
duplicate that ref, one for each implied role. We store the prior role
in the indirect dict that is part of such a duplicated ref, so that a
caller can determine where the assignment came from.
"""
def _make_implied_ref_copy(prior_ref, implied_role_id):
# Create a ref for an implied role from the ref of a prior role,
# setting the new role_id to be the implied role and the indirect
# role_id to be the prior role
implied_ref = copy.deepcopy(prior_ref)
implied_ref['role_id'] = implied_role_id
indirect = implied_ref.setdefault('indirect', {})
indirect['role_id'] = prior_ref['role_id']
return implied_ref
if not CONF.token.infer_roles:
return role_refs
try:
implied_roles_cache = {}
role_refs_to_check = list(role_refs)
ref_results = list(role_refs)
checked_role_refs = list()
while(role_refs_to_check):
next_ref = role_refs_to_check.pop()
checked_role_refs.append(next_ref)
next_role_id = next_ref['role_id']
if next_role_id in implied_roles_cache:
implied_roles = implied_roles_cache[next_role_id]
else:
implied_roles = (
PROVIDERS.role_api.list_implied_roles(next_role_id))
implied_roles_cache[next_role_id] = implied_roles
for implied_role in implied_roles:
implied_ref = (
_make_implied_ref_copy(
next_ref, implied_role['implied_role_id']))
if implied_ref in checked_role_refs:
# Avoid traversing a cycle
continue
else:
ref_results.append(implied_ref)
role_refs_to_check.append(implied_ref)
except exception.NotImplemented:
LOG.error('Role driver does not support implied roles.')
return ref_results
def _filter_by_role_id(self, role_id, ref_results):
# if we arrive here, we need to filer by role_id.
filter_results = []
for ref in ref_results:
if ref['role_id'] == role_id:
filter_results.append(ref)
return filter_results
def _strip_domain_roles(self, role_refs):
"""Post process assignment list for domain roles.
Domain roles are only designed to do the job of inferring other roles
and since that has been done before this method is called, we need to
remove any assignments that include a domain role.
"""
def _role_is_global(role_id):
ref = PROVIDERS.role_api.get_role(role_id)
return (ref['domain_id'] is None)
filter_results = []
for ref in role_refs:
if _role_is_global(ref['role_id']):
filter_results.append(ref)
return filter_results
def _list_effective_role_assignments(self, role_id, user_id, group_id,
domain_id, project_id, subtree_ids,
inherited, source_from_group_ids,
strip_domain_roles):
"""List role assignments in effective mode.
When using effective mode, besides the direct assignments, the indirect
ones that come from grouping or inheritance are retrieved and will then
be expanded.
The resulting list of assignments will be filtered by the provided
parameters. If subtree_ids is not None, then we also want to include
all subtree_ids in the filter as well. Since we are in effective mode,
group can never act as a filter (since group assignments are expanded
into user roles) and domain can only be filter if we want non-inherited
assignments, since domains can't inherit assignments.
The goal of this method is to only ask the driver for those
assignments as could effect the result based on the parameter filters
specified, hence avoiding retrieving a huge list.
"""
def list_role_assignments_for_actor(
role_id, inherited, user_id=None, group_ids=None,
project_id=None, subtree_ids=None, domain_id=None):
"""List role assignments for actor on target.
List direct and indirect assignments for an actor, optionally
for a given target (i.e. projects or domain).
:param role_id: List for a specific role, can be None meaning all
roles
:param inherited: Indicates whether inherited assignments or only
direct assignments are required. If None, then
both are required.
:param user_id: If not None, list only assignments that affect this
user.
:param group_ids: A list of groups required. Only one of user_id
and group_ids can be specified
:param project_id: If specified, only include those assignments
that affect at least this project, with
additionally any projects specified in
subtree_ids
:param subtree_ids: The list of projects in the subtree. If
specified, also include those assignments that
affect these projects. These projects are
guaranteed to be in the same domain as the
project specified in project_id. subtree_ids
can only be specified if project_id has also
been specified.
:param domain_id: If specified, only include those assignments
that affect this domain - by definition this will
not include any inherited assignments
:returns: List of assignments matching the criteria. Any inherited
or group assignments that could affect the resulting
response are included.
"""
project_ids_of_interest = None
if project_id:
if subtree_ids:
project_ids_of_interest = subtree_ids + [project_id]
else:
project_ids_of_interest = [project_id]
# List direct project role assignments
non_inherited_refs = []
if inherited is False or inherited is None:
# Get non inherited assignments
non_inherited_refs = self.driver.list_role_assignments(
role_id=role_id, domain_id=domain_id,
project_ids=project_ids_of_interest, user_id=user_id,
group_ids=group_ids, inherited_to_projects=False)
inherited_refs = []
if inherited is True or inherited is None:
# Get inherited assignments
if project_id:
# The project and any subtree are guaranteed to be owned by
# the same domain, so since we are filtering by these
# specific projects, then we can only get inherited
# assignments from their common domain or from any of
# their parents projects.
# List inherited assignments from the project's domain
proj_domain_id = PROVIDERS.resource_api.get_project(
project_id)['domain_id']
inherited_refs += self.driver.list_role_assignments(
role_id=role_id, domain_id=proj_domain_id,
user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
# For inherited assignments from projects, since we know
# they are from the same tree the only places these can
# come from are from parents of the main project or
# inherited assignments on the project or subtree itself.
source_ids = [project['id'] for project in
PROVIDERS.resource_api.list_project_parents(
project_id)]
if subtree_ids:
source_ids += project_ids_of_interest
if source_ids:
inherited_refs += self.driver.list_role_assignments(
role_id=role_id, project_ids=source_ids,
user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
else:
# List inherited assignments without filtering by target
inherited_refs = self.driver.list_role_assignments(
role_id=role_id, user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
return non_inherited_refs + inherited_refs
# If filtering by group or inherited domain assignment the list is
# guaranteed to be empty
if group_id or (domain_id and inherited):
return []
if user_id and source_from_group_ids:
# You can't do both - and since source_from_group_ids is only used
# internally, this must be a coding error by the caller.
msg = _('Cannot list assignments sourced from groups and filtered '
'by user ID.')
raise exception.UnexpectedError(msg)
# If filtering by domain, then only non-inherited assignments are
# relevant, since domains don't inherit assignments
inherited = False if domain_id else inherited
# List user or explicit group assignments.
# Due to the need to expand implied roles, this call will skip
# filtering by role_id and instead return the whole set of roles.
# Matching on the specified role is performed at the end.
direct_refs = list_role_assignments_for_actor(
role_id=None, user_id=user_id, group_ids=source_from_group_ids,
project_id=project_id, subtree_ids=subtree_ids,
domain_id=domain_id, inherited=inherited)
# And those from the user's groups, so long as we are not restricting
# to a set of source groups (in which case we already got those
# assignments in the direct listing above).
group_refs = []
if not source_from_group_ids and user_id:
group_ids = self._get_group_ids_for_user_id(user_id)
if group_ids:
group_refs = list_role_assignments_for_actor(
role_id=None, project_id=project_id,
subtree_ids=subtree_ids, group_ids=group_ids,
domain_id=domain_id, inherited=inherited)
# Expand grouping and inheritance on retrieved role assignments
refs = []
expand_groups = (source_from_group_ids is None)
for ref in (direct_refs + group_refs):
refs += self._expand_indirect_assignment(
ref, user_id, project_id, subtree_ids, expand_groups)
refs = self.add_implied_roles(refs)
if strip_domain_roles:
refs = self._strip_domain_roles(refs)
if role_id:
refs = self._filter_by_role_id(role_id, refs)
return refs
def _list_direct_role_assignments(self, role_id, user_id, group_id, system,
domain_id, project_id, subtree_ids,
inherited):
"""List role assignments without applying expansion.
Returns a list of direct role assignments, where their attributes match
the provided filters. If subtree_ids is not None, then we also want to
include all subtree_ids in the filter as well.
"""
group_ids = [group_id] if group_id else None
project_ids_of_interest = None
if project_id:
if subtree_ids:
project_ids_of_interest = subtree_ids + [project_id]
else:
project_ids_of_interest = [project_id]
project_and_domain_assignments = []
if not system:
project_and_domain_assignments = self.driver.list_role_assignments(
role_id=role_id, user_id=user_id, group_ids=group_ids,
domain_id=domain_id, project_ids=project_ids_of_interest,
inherited_to_projects=inherited)
system_assignments = []
if system or (not project_id and not domain_id and not system):
if user_id:
assignments = self.list_system_grants_for_user(user_id)
for assignment in assignments:
system_assignments.append(
{'system': {'all': True},
'user_id': user_id,
'role_id': assignment['id']}
)
elif group_id:
assignments = self.list_system_grants_for_group(group_id)
for assignment in assignments:
system_assignments.append(
{'system': {'all': True},
'group_id': group_id,
'role_id': assignment['id']}
)
else:
assignments = self.list_all_system_grants()
for assignment in assignments:
a = {}
if assignment['type'] == self._GROUP_SYSTEM:
a['group_id'] = assignment['actor_id']
elif assignment['type'] == self._USER_SYSTEM:
a['user_id'] = assignment['actor_id']
a['role_id'] = assignment['role_id']
a['system'] = {'all': True}
system_assignments.append(a)
for i, assignment in enumerate(system_assignments):
if role_id and role_id != assignment['role_id']:
system_assignments.pop(i)
assignments = []
for assignment in itertools.chain(
project_and_domain_assignments, system_assignments):
assignments.append(assignment)
return assignments
def list_role_assignments(self, role_id=None, user_id=None, group_id=None,
system=None, domain_id=None, project_id=None,
include_subtree=False, inherited=None,
effective=None, include_names=False,
source_from_group_ids=None,
strip_domain_roles=True):
"""List role assignments, honoring effective mode and provided filters.
Returns a list of role assignments, where their attributes match the
provided filters (role_id, user_id, group_id, domain_id, project_id and
inherited). If include_subtree is True, then assignments on all
descendants of the project specified by project_id are also included.
The inherited filter defaults to None, meaning to get both
non-inherited and inherited role assignments.
If effective mode is specified, this means that rather than simply
return the assignments that match the filters, any group or
inheritance assignments will be expanded. Group assignments will
become assignments for all the users in that group, and inherited
assignments will be shown on the projects below the assignment point.
Think of effective mode as being the list of assignments that actually
affect a user, for example the roles that would be placed in a token.
If include_names is set to true the entities' names are returned
in addition to their id's.
source_from_group_ids is a list of group IDs and, if specified, then
only those assignments that are derived from membership of these groups
are considered, and any such assignments will not be expanded into
their user membership assignments. This is different to a group filter
of the resulting list, instead being a restriction on which assignments
should be considered before expansion of inheritance. This option is
only used internally (i.e. it is not exposed at the API level) and is
only supported in effective mode (since in regular mode there is no
difference between this and a group filter, other than it is a list of
groups).
In effective mode, any domain specific roles are usually stripped from
the returned assignments (since such roles are not placed in tokens).
This stripping can be disabled by specifying strip_domain_roles=False,
which is useful for internal calls like trusts which need to examine
the full set of roles.
"""
subtree_ids = None
if project_id and include_subtree:
subtree_ids = (
[x['id'] for x in
PROVIDERS.resource_api.list_projects_in_subtree(
project_id)])
if system != 'all':
system = None
if effective:
role_assignments = self._list_effective_role_assignments(
role_id, user_id, group_id, domain_id, project_id,
subtree_ids, inherited, source_from_group_ids,
strip_domain_roles)
else:
role_assignments = self._list_direct_role_assignments(
role_id, user_id, group_id, system, domain_id, project_id,
subtree_ids, inherited)
if include_names:
return self._get_names_from_role_assignments(role_assignments)
return role_assignments
def _get_names_from_role_assignments(self, role_assignments):
role_assign_list = []
for role_asgmt in role_assignments:
new_assign = copy.deepcopy(role_asgmt)
for key, value in role_asgmt.items():
if key == 'domain_id':
_domain = PROVIDERS.resource_api.get_domain(value)
new_assign['domain_name'] = _domain['name']
elif key == 'user_id':
try:
# Note(knikolla): Try to get the user, otherwise
# if the user wasn't found in the backend
# use empty values.
_user = PROVIDERS.identity_api.get_user(value)
except exception.UserNotFound:
msg = ('User %(user)s not found in the'
' backend but still has role assignments.')
LOG.warning(msg, {'user': value})
new_assign['user_name'] = ''
new_assign['user_domain_id'] = ''
new_assign['user_domain_name'] = ''
else:
new_assign['user_name'] = _user['name']
new_assign['user_domain_id'] = _user['domain_id']
new_assign['user_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_user['domain_id'])['name'])
elif key == 'group_id':
try:
# Note(knikolla): Try to get the group, otherwise
# if the group wasn't found in the backend
# use empty values.
_group = PROVIDERS.identity_api.get_group(value)
except exception.GroupNotFound:
msg = ('Group %(group)s not found in the'
' backend but still has role assignments.')
LOG.warning(msg, {'group': value})
new_assign['group_name'] = ''
new_assign['group_domain_id'] = ''
new_assign['group_domain_name'] = ''
else:
new_assign['group_name'] = _group['name']
new_assign['group_domain_id'] = _group['domain_id']
new_assign['group_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_group['domain_id'])['name'])
elif key == 'project_id':
_project = PROVIDERS.resource_api.get_project(value)
new_assign['project_name'] = _project['name']
new_assign['project_domain_id'] = _project['domain_id']
new_assign['project_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_project['domain_id'])['name'])
elif key == 'role_id':
_role = PROVIDERS.role_api.get_role(value)
new_assign['role_name'] = _role['name']
if _role['domain_id'] is not None:
new_assign['role_domain_id'] = _role['domain_id']
new_assign['role_domain_name'] = (
PROVIDERS.resource_api.get_domain(
_role['domain_id'])['name'])
role_assign_list.append(new_assign)
return role_assign_list
def delete_group_assignments(self, group_id):
# FIXME(lbragstad): This should be refactored in the Rocky release so
# that we can pass the group_id to the system assignment backend like
# we do with the project and domain assignment backend. Holding off on
# this because it will require an interface change to the backend,
# making it harder to backport for Queens RC.
self.driver.delete_group_assignments(group_id)
system_assignments = self.list_system_grants_for_group(group_id)
for assignment in system_assignments:
self.delete_system_grant_for_group(group_id, assignment['id'])
def delete_user_assignments(self, user_id):
# FIXME(lbragstad): This should be refactored in the Rocky release so
# that we can pass the user_id to the system assignment backend like we
# do with the project and domain assignment backend. Holding off on
# this because it will require an interface change to the backend,
# making it harder to backport for Queens RC.
self.driver.delete_user_assignments(user_id)
system_assignments = self.list_system_grants_for_user(user_id)
for assignment in system_assignments:
self.delete_system_grant_for_user(user_id, assignment['id'])
def check_system_grant_for_user(self, user_id, role_id):
"""Check if a user has a specific role on the system.
:param user_id: the ID of the user in the assignment
:param role_id: the ID of the system role in the assignment
:raises keystone.exception.RoleAssignmentNotFound: if the user doesn't
have a role assignment matching the role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
return self.driver.check_system_grant(
role_id, user_id, target_id, inherited
)
def list_system_grants_for_user(self, user_id):
"""Return a list of roles the user has on the system.
:param user_id: the ID of the user
:returns: a list of role assignments the user has system-wide
"""
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._USER_SYSTEM
grants = self.driver.list_system_grants(
user_id, target_id, assignment_type
)
grant_ids = []
for grant in grants:
grant_ids.append(grant['role_id'])
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
def create_system_grant_for_user(self, user_id, role_id):
"""Grant a user a role on the system.
:param user_id: the ID of the user
:param role_id: the ID of the role to grant on the system
"""
role = PROVIDERS.role_api.get_role(role_id)
if role.get('domain_id'):
raise exception.ValidationError(
'Role %(role_id)s is a domain-specific role. Unable to use '
'a domain-specific role in a system assignment.' % {
'role_id': role_id
}
)
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._USER_SYSTEM
inherited = False
self.driver.create_system_grant(
role_id, user_id, target_id, assignment_type, inherited
)
def delete_system_grant_for_user(self, user_id, role_id):
"""Remove a system grant from a user.
:param user_id: the ID of the user
:param role_id: the ID of the role to remove from the user on the
system
:raises keystone.exception.RoleAssignmentNotFound: if the user doesn't
have a role assignment with role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
self.driver.delete_system_grant(role_id, user_id, target_id, inherited)
def check_system_grant_for_group(self, group_id, role_id):
"""Check if a group has a specific role on the system.
:param group_id: the ID of the group in the assignment
:param role_id: the ID of the system role in the assignment
:raises keystone.exception.RoleAssignmentNotFound: if the group doesn't
have a role assignment matching the role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
return self.driver.check_system_grant(
role_id, group_id, target_id, inherited
)
def list_system_grants_for_group(self, group_id):
"""Return a list of roles the group has on the system.
:param group_id: the ID of the group
:returns: a list of role assignments the group has system-wide
"""
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._GROUP_SYSTEM
grants = self.driver.list_system_grants(
group_id, target_id, assignment_type
)
grant_ids = []
for grant in grants:
grant_ids.append(grant['role_id'])
return PROVIDERS.role_api.list_roles_from_ids(grant_ids)
def create_system_grant_for_group(self, group_id, role_id):
"""Grant a group a role on the system.
:param group_id: the ID of the group
:param role_id: the ID of the role to grant on the system
"""
role = PROVIDERS.role_api.get_role(role_id)
if role.get('domain_id'):
raise exception.ValidationError(
'Role %(role_id)s is a domain-specific role. Unable to use '
'a domain-specific role in a system assignment.' % {
'role_id': role_id
}
)
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = self._GROUP_SYSTEM
inherited = False
self.driver.create_system_grant(
role_id, group_id, target_id, assignment_type, inherited
)
def delete_system_grant_for_group(self, group_id, role_id):
"""Remove a system grant from a group.
:param group_id: the ID of the group
:param role_id: the ID of the role to remove from the group on the
system
:raises keystone.exception.RoleAssignmentNotFound: if the group doesn't
have a role assignment with role_id on the system
"""
target_id = self._SYSTEM_SCOPE_TOKEN
inherited = False
self.driver.delete_system_grant(
role_id, group_id, target_id, inherited
)
def list_all_system_grants(self):
"""Return a list of all system grants."""
actor_id = None
target_id = self._SYSTEM_SCOPE_TOKEN
assignment_type = None
return self.driver.list_system_grants(
actor_id, target_id, assignment_type
)
class RoleManager(manager.Manager):
"""Default pivot point for the Role backend."""
driver_namespace = 'keystone.role'
_provides_api = 'role_api'
_ROLE = 'role'
def __init__(self):
# If there is a specific driver specified for role, then use it.
# Otherwise retrieve the driver type from the assignment driver.
role_driver = CONF.role.driver
if role_driver is None:
# Explicitly load the assignment manager object
assignment_driver = CONF.assignment.driver
assignment_manager_obj = manager.load_driver(
Manager.driver_namespace,
assignment_driver)
role_driver = assignment_manager_obj.default_role_driver()
super(RoleManager, self).__init__(role_driver)
@MEMOIZE
def get_role(self, role_id):
return self.driver.get_role(role_id)
def get_unique_role_by_name(self, role_name, hints=None):
if not hints:
hints = driver_hints.Hints()
hints.add_filter("name", role_name, case_sensitive=True)
found_roles = PROVIDERS.role_api.list_roles(hints)
if not found_roles:
raise exception.RoleNotFound(
_("Role %s is not defined") % role_name
)
elif len(found_roles) == 1:
return {'id': found_roles[0]['id']}
else:
raise exception.AmbiguityError(resource='role',
name=role_name)
def create_role(self, role_id, role, initiator=None):
ret = self.driver.create_role(role_id, role)
notifications.Audit.created(self._ROLE, role_id, initiator)
if MEMOIZE.should_cache(ret):
self.get_role.set(ret, self, role_id)
return ret
@manager.response_truncated
def list_roles(self, hints=None):
return self.driver.list_roles(hints or driver_hints.Hints())
def update_role(self, role_id, role, initiator=None):
original_role = self.driver.get_role(role_id)
if ('domain_id' in role and
role['domain_id'] != original_role['domain_id']):
raise exception.ValidationError(
message=_('Update of `domain_id` is not allowed.'))
ret = self.driver.update_role(role_id, role)
notifications.Audit.updated(self._ROLE, role_id, initiator)
self.get_role.invalidate(self, role_id)
return ret
def delete_role(self, role_id, initiator=None):
PROVIDERS.assignment_api.delete_role_assignments(role_id)
PROVIDERS.assignment_api._send_app_cred_notification_for_role_removal(
role_id
)
self.driver.delete_role(role_id)
notifications.Audit.deleted(self._ROLE, role_id, initiator)
self.get_role.invalidate(self, role_id)
reason = (
'Invalidating the token cache because role %(role_id)s has been '
'removed. Role assignments for users will be recalculated and '
'enforced accordingly the next time they authenticate or validate '
'a token' % {'role_id': role_id}
)
notifications.invalidate_token_cache_notification(reason)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
# TODO(ayoung): Add notification
def create_implied_role(self, prior_role_id, implied_role_id):
implied_role = self.driver.get_role(implied_role_id)
prior_role = self.driver.get_role(prior_role_id)
if implied_role['name'] in CONF.assignment.prohibited_implied_role:
raise exception.InvalidImpliedRole(role_id=implied_role_id)
if prior_role['domain_id'] is None and implied_role['domain_id']:
msg = _('Global role cannot imply a domain-specific role')
raise exception.InvalidImpliedRole(msg,
role_id=implied_role_id)
response = self.driver.create_implied_role(
prior_role_id, implied_role_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
return response
def delete_implied_role(self, prior_role_id, implied_role_id):
self.driver.delete_implied_role(prior_role_id, implied_role_id)
COMPUTED_ASSIGNMENTS_REGION.invalidate()
| 44.520741 | 79 | 0.606842 | 58,450 | 0.972497 | 0 | 0 | 54,447 | 0.905895 | 0 | 0 | 23,790 | 0.395821 |
4059ed80d6a8d54038d707dea3406a21f8501339 | 3,193 | py | Python | single-shot-pose/lib/linemod_dataset.py | take-cheeze/models | 3ded8fd062c57f20f6154cac2dd0d998181de755 | [
"MIT"
]
| 112 | 2018-04-18T07:13:03.000Z | 2022-03-11T03:36:34.000Z | single-shot-pose/lib/linemod_dataset.py | take-cheeze/models | 3ded8fd062c57f20f6154cac2dd0d998181de755 | [
"MIT"
]
| 16 | 2018-05-11T11:41:08.000Z | 2021-04-24T03:50:54.000Z | single-shot-pose/lib/linemod_dataset.py | take-cheeze/models | 3ded8fd062c57f20f6154cac2dd0d998181de755 | [
"MIT"
]
| 45 | 2018-04-18T07:13:06.000Z | 2021-12-22T03:46:18.000Z | import numpy as np
import os
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.utils import read_image
linemod_object_diameters = {
'ape': 0.103,
'benchvise': 0.286908,
'cam': 0.173,
'can': 0.202,
'cat': 0.155,
'driller': 0.262,
'duck': 0.109,
'eggbox': 0.176364,
'glue': 0.176,
'holepuncher': 0.162,
'iron': 0.303153,
'lamp': 0.285155,
'phone': 0.213}
class LinemodDataset(GetterDataset):
def __init__(self, base_dir, obj_name='ape', split='train',
return_msk=False):
super(LinemodDataset, self).__init__()
split_path = os.path.join(
base_dir, 'LINEMOD', obj_name, '{}.txt'.format(split))
self.base_dir = base_dir
with open(split_path, 'r') as f:
self.img_paths = f.readlines()
self.add_getter(('img', 'point', 'label'), self._get_example)
if return_msk:
self.add_getter('msk', self._get_msk)
def __len__(self):
return len(self.img_paths)
def _get_example(self, i):
img_path = os.path.join(self.base_dir, self.img_paths[i].rstrip())
img = read_image(img_path)
anno_path = img_path.replace(
'images', 'labels').replace(
'JPEGImages', 'labels').replace(
'.jpg', '.txt').replace('.png', '.txt')
anno = np.zeros(50*21)
if os.path.getsize(anno_path):
_, H, W = img.shape
tmp = read_truths_args(anno_path, 8.0/W)
size = tmp.size
if size > 50*21:
anno = tmp[0:50*21]
elif size > 0:
anno[0:size] = tmp
anno = anno.reshape(-1, 21)
anno = anno[:truths_length(anno)]
point = anno[:, 1:19].reshape(-1, 9, 2).astype(np.float32)
point[:, :, 0] *= W
point[:, :, 1] *= H
label = anno[:, 0].astype(np.int32)
return img, point, label
def _get_msk(self, i):
img_path = os.path.join(self.base_dir, self.img_paths[i].rstrip())
mskpath = img_path.replace('JPEGImages', 'mask').replace(
'/00', '/').replace('.jpg', '.png')
msk = read_image(mskpath, color=False)[0]
return msk > 0
def truths_length(truths):
for i in range(50):
if truths[i][1] == 0:
return i
def read_truths(lab_path):
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
# to avoid single truth problem
truths = truths.reshape(truths.size//21, 21)
return truths
else:
return np.array([])
def read_truths_args(lab_path, min_box_scale):
truths = read_truths(lab_path)
new_truths = []
for i in range(truths.shape[0]):
new_truths.append(
[truths[i][0], truths[i][1], truths[i][2],
truths[i][3], truths[i][4], truths[i][5],
truths[i][6], truths[i][7], truths[i][8],
truths[i][9], truths[i][10], truths[i][11],
truths[i][12], truths[i][13], truths[i][14],
truths[i][15], truths[i][16], truths[i][17],
truths[i][18]])
return np.array(new_truths)
| 30.409524 | 75 | 0.551832 | 1,816 | 0.568744 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.086752 |
405b957bd7045b5d856865ed3de04736c0fcea38 | 10,857 | py | Python | DQM/BeamMonitor/test/44X_beam_dqm_sourceclient-live_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
]
| null | null | null | DQM/BeamMonitor/test/44X_beam_dqm_sourceclient-live_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
]
| null | null | null | DQM/BeamMonitor/test/44X_beam_dqm_sourceclient-live_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
]
| null | null | null | import FWCore.ParameterSet.Config as cms
process = cms.Process("BeamMonitor")
#----------------------------
# Common part for PP and H.I Running
#-----------------------------
process.load("DQM.Integration.test.inputsource_cfi")
#--------------------------
# HLT Filter
process.load("HLTrigger.special.HLTTriggerTypeFilter_cfi")
# 0=random, 1=physics, 2=calibration, 3=technical
process.hltTriggerTypeFilter.SelectedTriggerType = 1
#----------------------------
# DQM Live Environment
#-----------------------------
process.load("DQM.Integration.test.environment_cfi")
process.dqmEnv.subSystemFolder = 'BeamMonitor'
import DQMServices.Components.DQMEnvironment_cfi
process.dqmEnvPixelLess = DQMServices.Components.DQMEnvironment_cfi.dqmEnv.clone()
process.dqmEnvPixelLess.subSystemFolder = 'BeamMonitor_PixelLess'
#----------------------------
# BeamMonitor
#-----------------------------
process.load("DQM.BeamMonitor.BeamMonitor_cff")
process.load("DQM.BeamMonitor.BeamMonitorBx_cff")
process.load("DQM.BeamMonitor.BeamMonitor_PixelLess_cff")
process.load("DQM.BeamMonitor.BeamConditionsMonitor_cff")
#### SETUP TRACKING RECONSTRUCTION ####
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load("DQM.Integration.test.FrontierCondition_GT_cfi")
process.load("Configuration.StandardSequences.RawToDigi_Data_cff")
# Change Beam Monitor variables
if process.dqmSaver.producer.value() is "Playback":
process.dqmBeamMonitor.BeamFitter.WriteAscii = False
process.dqmBeamMonitor.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.txt'
process.dqmBeamMonitor.BeamFitter.WriteDIPAscii = True
process.dqmBeamMonitor.BeamFitter.DIPFileName = '/nfshome0/dqmdev/BeamMonitorDQM/BeamFitResults.txt'
else:
process.dqmBeamMonitor.BeamFitter.WriteAscii = True
process.dqmBeamMonitor.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.txt'
process.dqmBeamMonitor.BeamFitter.WriteDIPAscii = True
process.dqmBeamMonitor.BeamFitter.DIPFileName = '/nfshome0/dqmpro/BeamMonitorDQM/BeamFitResults.txt'
#process.dqmBeamMonitor.BeamFitter.SaveFitResults = False
#process.dqmBeamMonitor.BeamFitter.OutputFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.root'
process.dqmBeamMonitorBx.BeamFitter.WriteAscii = True
process.dqmBeamMonitorBx.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults_Bx.txt'
## TKStatus
process.dqmTKStatus = cms.EDAnalyzer("TKStatus",
BeamFitter = cms.PSet(
DIPFileName = process.dqmBeamMonitor.BeamFitter.DIPFileName
)
)
process.dqmcommon = cms.Sequence(process.dqmEnv
*process.dqmSaver)
process.monitor = cms.Sequence(process.dqmBeamMonitor)
#--------------------------
# Proton-Proton Stuff
#--------------------------
if (process.runType.getRunType() == process.runType.pp_run or process.runType.getRunType() == process.runType.cosmic_run):
print "Running pp"
process.EventStreamHttpReader.SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('HLT_L1*',
'HLT_Jet*',
'HLT_*Cosmic*',
'HLT_HT*',
'HLT_MinBias_*',
'HLT_Physics*',
'HLT_ZeroBias_v*')
)
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("RecoTracker.IterativeTracking.iterativeTk_cff")
## Pixelless Tracking
process.load('RecoTracker/Configuration/RecoTrackerNotStandard_cff')
process.MeasurementTracker.pixelClusterProducer = cms.string("")
# Offline Beam Spot
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cff")
## Offline PrimaryVertices
import RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi
process.offlinePrimaryVertices = RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi.offlinePrimaryVertices.clone()
process.dqmBeamMonitor.OnlineMode = True
process.dqmBeamMonitor.resetEveryNLumi = 5
process.dqmBeamMonitor.resetPVEveryNLumi = 5
process.dqmBeamMonitor.PVFitter.minNrVerticesForFit = 25
process.dqmBeamMonitor.BeamFitter.TrackCollection = cms.untracked.InputTag('generalTracks')
process.offlinePrimaryVertices.TrackLabel = cms.InputTag("generalTracks")
process.offlinePrimaryVertices.label=cms.string("")
process.offlinePrimaryVertices.minNdof=cms.double(0.0)
process.offlinePrimaryVertices.useBeamConstraint=cms.bool(False)
#TriggerName for selecting pv for DIP publication, NO wildcard needed here
#it will pick all triggers which has these strings in theri name
process.dqmBeamMonitor.jetTrigger = cms.untracked.vstring("HLT_ZeroBias_v",
"HLT_Jet300_v",
"HLT_QuadJet70_v")
process.dqmBeamMonitor.hltResults = cms.InputTag("TriggerResults","","HLT")
#fast general track reco
process.iterTracking =cms.Sequence(process.InitialStep
*process.LowPtTripletStep
*process.PixelPairStep
*process.DetachedTripletStep
*process.MixedTripletStep
*process.PixelLessStep
*process.TobTecStep
*process.generalTracks)
process.tracking_FirstStep = cms.Sequence(process.siPixelDigis
*process.siStripDigis
*process.trackerlocalreco
*process.offlineBeamSpot
*process.recopixelvertexing
*process.iterTracking)
process.p = cms.Path(process.scalersRawToDigi
*process.dqmTKStatus
*process.hltTriggerTypeFilter
*process.dqmcommon
*process.tracking_FirstStep
*process.offlinePrimaryVertices
*process.monitor)
#--------------------------------------------------
# Heavy Ion Stuff
#--------------------------------------------------
if (process.runType.getRunType() == process.runType.hi_run):
print "Running HI"
process.castorDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.csctfDigis.producer = cms.InputTag("rawDataRepacker")
process.dttfDigis.DTTF_FED_Source = cms.InputTag("rawDataRepacker")
process.ecalDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.ecalPreshowerDigis.sourceTag = cms.InputTag("rawDataRepacker")
process.gctDigis.inputLabel = cms.InputTag("rawDataRepacker")
process.gtDigis.DaqGtInputTag = cms.InputTag("rawDataRepacker")
process.gtEvmDigis.EvmGtInputTag = cms.InputTag("rawDataRepacker")
process.hcalDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.muonCSCDigis.InputObjects = cms.InputTag("rawDataRepacker")
process.muonDTDigis.inputLabel = cms.InputTag("rawDataRepacker")
process.muonRPCDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.scalersRawToDigi.scalersInputTag = cms.InputTag("rawDataRepacker")
#----------------------------
# Event Source
#-----------------------------
process.EventStreamHttpReader.SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring(
'HLT_HI*'
)
)
process.dqmBeamMonitor.OnlineMode = True ## in MC the LS are not ordered??
process.dqmBeamMonitor.resetEveryNLumi = 10
process.dqmBeamMonitor.resetPVEveryNLumi = 10
process.dqmBeamMonitor.BeamFitter.MinimumTotalLayers = 3 ## using pixel triplets
process.dqmBeamMonitor.PVFitter.minNrVerticesForFit = 20
process.dqmBeamMonitor.jetTrigger = cms.untracked.vstring("HLT_HI")
process.dqmBeamMonitor.hltResults = cms.InputTag("TriggerResults","","HLT")
## Load Heavy Ion Sequence
process.load("Configuration.StandardSequences.ReconstructionHeavyIons_cff") ## HI sequences
# Select events based on the pixel cluster multiplicity
import HLTrigger.special.hltPixelActivityFilter_cfi
process.multFilter = HLTrigger.special.hltPixelActivityFilter_cfi.hltPixelActivityFilter.clone(
inputTag = cms.InputTag('siPixelClusters'),
minClusters = cms.uint32(150),
maxClusters = cms.uint32(50000)
)
process.filter_step = cms.Sequence( process.siPixelDigis
*process.siPixelClusters
#*process.multFilter
)
process.HIRecoForDQM = cms.Sequence( process.siPixelDigis
*process.siPixelClusters
*process.siPixelRecHits
*process.offlineBeamSpot
*process.hiPixelVertices
*process.hiPixel3PrimTracks
)
# use HI pixel tracking and vertexing
process.dqmBeamMonitor.BeamFitter.TrackCollection = cms.untracked.InputTag('hiPixel3PrimTracks')
process.dqmBeamMonitorBx.BeamFitter.TrackCollection = cms.untracked.InputTag('hiPixel3PrimTracks')
process.dqmBeamMonitor.primaryVertex = cms.untracked.InputTag('hiSelectedVertex')
process.dqmBeamMonitor.PVFitter.VertexCollection = cms.untracked.InputTag('hiSelectedVertex')
# make pixel vertexing less sensitive to incorrect beamspot
process.hiPixel3ProtoTracks.RegionFactoryPSet.RegionPSet.originRadius = 0.2
process.hiPixel3ProtoTracks.RegionFactoryPSet.RegionPSet.fixedError = 0.5
process.hiSelectedProtoTracks.maxD0Significance = 100
process.hiPixelAdaptiveVertex.TkFilterParameters.maxD0Significance = 100
process.hiPixelAdaptiveVertex.vertexCollections.useBeamConstraint = False
#not working due to wrong tag of reco
process.hiPixelAdaptiveVertex.vertexCollections.maxDistanceToBeam = 1.0
process.p = cms.Path(process.scalersRawToDigi
*process.dqmTKStatus
*process.hltTriggerTypeFilter
*process.filter_step
*process.HIRecoForDQM
*process.dqmcommon
*process.monitor)
| 42.410156 | 127 | 0.644377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,040 | 0.280004 |
405c6e44b37edbad093dd87de80a9e8b880c990d | 3,036 | py | Python | tests/routes/test_hackers.py | TorrentofShame/hackathon-2021-backend | a85989421df8ad900b01ad026dbe713312b0a54e | [
"MIT"
]
| null | null | null | tests/routes/test_hackers.py | TorrentofShame/hackathon-2021-backend | a85989421df8ad900b01ad026dbe713312b0a54e | [
"MIT"
]
| null | null | null | tests/routes/test_hackers.py | TorrentofShame/hackathon-2021-backend | a85989421df8ad900b01ad026dbe713312b0a54e | [
"MIT"
]
| null | null | null | # flake8: noqa
import json
from src.models.hacker import Hacker
from tests.base import BaseTestCase
from datetime import datetime
class TestHackersBlueprint(BaseTestCase):
"""Tests for the Hackers Endpoints"""
"""create_hacker"""
def test_create_hacker(self):
now = datetime.now()
res = self.client.post(
"/api/hackers/",
data={"hacker": json.dumps(
{
"email": "[email protected]",
"date": now.isoformat(),
}
)},
content_type="multipart/form-data",
)
self.assertEqual(res.status_code, 201)
self.assertEqual(Hacker.objects.count(), 1)
def test_create_hacker_invalid_json(self):
res = self.client.post(
"/api/hackers/", data={"hacker": json.dumps({})}, content_type="multipart/form-data"
)
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 400)
self.assertEqual(data["name"], "Bad Request")
self.assertEqual(Hacker.objects.count(), 0)
def test_create_hacker_duplicate_user(self):
now = datetime.now()
Hacker.createOne(
email="[email protected]"
)
res = self.client.post(
"/api/hackers/",
data={"hacker": json.dumps(
{
"email": "[email protected]",
"date": now.isoformat(),
}
)},
content_type="multipart/form-data",
)
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 409)
self.assertIn(
"Sorry, that email already exists.", data["description"]
)
self.assertEqual(Hacker.objects.count(), 1)
def test_create_hacker_invalid_datatypes(self):
res = self.client.post(
"/api/hackers/",
data=json.dumps(
{"email": "notanemail"}
),
content_type="application/json",
)
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 400)
self.assertEqual(data["name"], "Bad Request")
self.assertEqual(Hacker.objects.count(), 0)
"""get_all_hackers"""
def test_get_all_hackers(self):
Hacker.createOne(
email="[email protected]"
)
Hacker.createOne(
email="[email protected]",
)
res = self.client.get("/api/hackers/get_all_hackers/")
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 201)
self.assertEqual(data["hackers"][0]["email"], "[email protected]")
self.assertEqual(data["hackers"][1]["email"], "[email protected]")
def test_get_all_hackers_not_found(self):
res = self.client.get("/api/hackers/get_all_hackers/")
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 404)
self.assertEqual(data["name"], "Not Found")
| 28.641509 | 96 | 0.560606 | 2,903 | 0.956192 | 0 | 0 | 0 | 0 | 0 | 0 | 626 | 0.206192 |
405e5ce74a48720ac95f86fcad8f93d05cb3edfc | 13,330 | py | Python | open_cp/sources/chicago.py | sumau/PredictCode | e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8 | [
"Artistic-2.0"
]
| 18 | 2017-04-19T09:17:19.000Z | 2021-05-24T08:53:28.000Z | open_cp/sources/chicago.py | sumau/PredictCode | e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8 | [
"Artistic-2.0"
]
| 8 | 2017-06-11T17:46:35.000Z | 2021-06-07T10:49:10.000Z | open_cp/sources/chicago.py | sumau/PredictCode | e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8 | [
"Artistic-2.0"
]
| 10 | 2017-07-19T18:29:37.000Z | 2020-11-12T22:06:45.000Z | """
sources.chicago
===============
Reads a CSV file in the format (as of April 2017) of data available from:
- https://catalog.data.gov/dataset/crimes-one-year-prior-to-present-e171f
- https://catalog.data.gov/dataset/crimes-2001-to-present-398a4
The default data is loaded from a file "chicago.csv" which should be downloaded
from one of the above links. The format of the data, frustratingly, differs
between the snapshot of last year, and the total.
The data is partly anonymous in that the address within a block is obscured,
while the geocoding seems complicated (work in progress to understand)...
The crime type "HOMICIDE" is reported multiple times in the dataset.
"""
import csv as _csv
import os.path as _path
import datetime
import numpy as _np
from ..data import TimedPoints
_datadir = None
_default_filename = "chicago.csv"
_FEET_IN_METERS = 3937 / 1200
def set_data_directory(datadir):
"""Set the default location for search for the default input file."""
global _datadir
_datadir = datadir
def get_default_filename():
"""Returns the default filename, if available. Otherwise raises
AttributeError.
"""
global _datadir
if _datadir is None:
raise AttributeError("datadir not set; call `set_data_directory()`.")
return _path.join(_datadir, _default_filename)
def _date_from_csv(date_string):
return datetime.datetime.strptime(date_string, "%m/%d/%Y %I:%M:%S %p")
def date_from_iso(iso_string):
"""Convert a datetime string in ISO format into a :class:`datetime`
instance.
:param iso_string: Like "2017-10-23T05:12:39"
:return: A :class:`datetime` instance.
"""
return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S")
def _date_from_other(dt_str):
# Like 4/16/13 5:00
try:
date, time = dt_str.split()
month, day, year = date.split("/")
hour, minutes = time.split(":")
return datetime.datetime(year=int(year)+2000, month=int(month), day=int(day),
hour=int(hour), minute=int(minutes))
except Exception as ex:
raise Exception("Failed to parse {}, cause {}/{}".format(dt_str, type(ex), ex))
_FIELDS = {
"snapshot" : {
"_DESCRIPTION_FIELD" : ' PRIMARY DESCRIPTION',
"_X_FIELD" : 'X COORDINATE',
"_Y_FIELD" : 'Y COORDINATE',
"_TIME_FIELD" : 'DATE OF OCCURRENCE',
"_GEOJSON_LOOKUP" : {"case": 'CASE#',
"address": "BLOCK",
"location": ' LOCATION DESCRIPTION',
"crime": ' PRIMARY DESCRIPTION',
"type": ' SECONDARY DESCRIPTION',
"timestamp": 'DATE OF OCCURRENCE'},
"GEOJSON_COORDS" : ('LONGITUDE', 'LATITUDE'),
"DT_CONVERT" : _date_from_csv
},
"all" : {
"_DESCRIPTION_FIELD" : 'Primary Type',
"_X_FIELD" : 'X Coordinate',
"_Y_FIELD" : 'Y Coordinate',
"_TIME_FIELD" : 'Date',
"_GEOJSON_LOOKUP" : {"case": 'Case Number',
"address": "Block",
"location": 'Location Description',
"crime": 'Primary Type',
"type": 'Description',
"timestamp": 'Date'},
"GEOJSON_COORDS" : ('Longitude', 'Latitude'),
"DT_CONVERT" : _date_from_csv
},
"gen" : {
"_DESCRIPTION_FIELD" : 'CRIME',
"_X_FIELD" : 'X',
"_Y_FIELD" : 'Y',
"_TIME_FIELD" : 'TIMESTAMP',
"_GEOJSON_LOOKUP" : {"case": 'CASE',
"address": "BLOCK",
"location": 'LOCATION',
"crime": 'CRIME',
"type": 'SUB-TYPE',
"timestamp": 'TIMESTAMP'},
"GEOJSON_COORDS" : ('X', 'Y'),
"DT_CONVERT" : _date_from_csv
}
}
_FIELDS["all_other"] = dict(_FIELDS["all"])
_FIELDS["all_other"]["DT_CONVERT"] = _date_from_other
def _convert_header(header, dic):
lookup = dict()
for field in [dic["_DESCRIPTION_FIELD"], dic["_X_FIELD"], dic["_Y_FIELD"], dic["_TIME_FIELD"]]:
if not field in header:
raise Exception("No field '{}' found in header".format(field))
lookup[field] = header.index(field)
return lookup
def default_burglary_data():
"""Load the default data, if available, giving just "THEFT" data.
:return: An instance of :class:`open_cp.data.TimedPoints` or `None`.
"""
try:
return load(get_default_filename(), {"THEFT"})
except Exception:
return None
def _get_dic(type):
try:
return _FIELDS[type]
except KeyError:
raise ValueError("Don't understand type {}".format(type))
def _load_to_list(file, dic, primary_description_names):
reader = _csv.reader(file)
lookup = _convert_header(next(reader), dic)
dt_convert = dic["DT_CONVERT"]
data = []
for row in reader:
description = row[lookup[dic["_DESCRIPTION_FIELD"]]].strip()
if not description in primary_description_names:
continue
x = row[lookup[dic["_X_FIELD"]]].strip()
y = row[lookup[dic["_Y_FIELD"]]].strip()
t = row[lookup[dic["_TIME_FIELD"]]].strip()
if x != "" and y != "":
data.append((dt_convert(t), float(x), float(y)))
return data
def load(file, primary_description_names, to_meters=True, type="snapshot"):
"""Load data from a CSV file in the expected format.
:param file: Name of the CSV file load, or a file-like object.
:param primary_description_names: Set of names to search for in the
"primary description field". E.g. pass `{"THEFT"}` to return only the
"theft" crime type.
:param to_meters: Convert the coordinates to meters; True by default.
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:return: An instance of :class:`open_cp.data.TimedPoints` or `None`.
"""
dic = _get_dic(type)
if isinstance(file, str):
with open(file) as file:
data = _load_to_list(file, dic, primary_description_names)
else:
data = _load_to_list(file, dic, primary_description_names)
data.sort(key = lambda triple : triple[0])
xcoords = _np.empty(len(data))
ycoords = _np.empty(len(data))
for i, (_, x, y) in enumerate(data):
xcoords[i], ycoords[i] = x, y
times = [t for t, _, _ in data]
if to_meters:
xcoords /= _FEET_IN_METERS
ycoords /= _FEET_IN_METERS
return TimedPoints.from_coords(times, xcoords, ycoords)
def _convert_header_for_geojson(header, dic):
try:
column_lookup = {}
for key, col_head in dic["_GEOJSON_LOOKUP"].items():
column_lookup[key] = header.index(col_head)
coord_lookup = [header.index(chead) for chead in dic["GEOJSON_COORDS"]]
return column_lookup, coord_lookup
except KeyError as ex:
raise ValueError("Header not in expected format: {} caused by {}/{}".format(
header, type(ex), ex))
def _generate_GeoJSON_Features(file, dic):
dt_convert = dic["DT_CONVERT"]
reader = _csv.reader(file)
column_lookup, coord_lookup = _convert_header_for_geojson(next(reader), dic)
for row in reader:
properties = {key : row[i] for key, i in column_lookup.items()}
properties["timestamp"] = dt_convert(properties["timestamp"]).isoformat()
if row[coord_lookup[0]] == "":
geometry = None
else:
coordinates = [float(row[i]) for i in coord_lookup]
geometry = {"type":"Point", "coordinates":coordinates}
yield {"geometry": geometry, "properties": properties,
"type": "Feature"}
def generate_GeoJSON_Features(file, type="snapshot"):
"""Generate a sequence of GeoJSON "features" from the CSV file.
See :func:`load_to_GeoJSON`.
:param file: Either a filename, or a file object.
"""
dic = _get_dic(type)
if isinstance(file, str):
with open(file) as f:
yield from _generate_GeoJSON_Features(f, dic)
else:
yield from _generate_GeoJSON_Features(file, dic)
def load_to_GeoJSON(filename, type="snapshot"):
"""Load the specified CSV file to a list of GeoJSON (see
http://geojson.org/) features. Events with no location data have `None`
as the geometry. Timestamps are converted to standard ISO string format.
The returned "properties" have these keys:
- "case" for the "CASE#" field
- "crime" for the "PRIMARY DESCRIPTION" field
- "type" for the "SECONDARY DESCRIPTION" field
- "location" for the "LOCATION DESCRIPTION" field
- "timestamp" for the "DATE OF OCCURRENCE" field
- "address" for the "BLOCK" field
:param filename: Filename of the CSV file to process
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:return: List of Python dictionaries in GeoJSON format.
"""
return list(generate_GeoJSON_Features(filename, type))
try:
import geopandas as gpd
import shapely.geometry as _geometry
except:
gpd = None
_geometry = None
def convert_null_geometry_to_empty(frame):
"""Utility method. Convert any geometry in the geoDataFrame which is
"null" (`None` or empty) to a Point type geometry which is empty. The
returned geoDateFrame is suitable for projecting and other geometrical
transformations.
"""
def null_to_point(x):
if x is None or x.is_empty:
return _geometry.Point()
return x
newgeo = frame.geometry.map(null_to_point)
return frame.set_geometry(newgeo)
def convert_null_geometry_to_none(frame):
"""Utility method. Convert any geometry in the geoDataFrame which is
"null" (`None` or empty) to `None`. The returned geoDateFrame is suitable
for saving.
"""
def null_to_none(x):
if x is None or x.is_empty:
return None
return x
newgeo = frame.geometry.map(null_to_none)
return frame.set_geometry(newgeo)
def load_to_geoDataFrame(filename, datetime_as_string=True,
type="snapshot", empty_geometry="none"):
"""Return the same data as :func:`load_to_GeoJSON` but as a geoPandas
data-frame.
:param filename: Filename of the CSV file to process
:param datetime_as_string: Write the timestamp as an ISO formatted string.
Defaults to True which is best for saving the dataframe as e.g. a shape
file. Set to False to get timestamps as python objects, which is best
for using (geo)pandas to analyse the data.
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:param empty_geometry: Either "none" to return `None` as the geometry of
crimes which have no location data in the CSV file (this is correct if
you wish to save the data-frame); or "empty" to return an empty `Point`
type (which is correct, for example, if you wish to re-project the
data-frame). Yes, GeoPandas appears to be annoying like this.
"""
geo_data = load_to_GeoJSON(filename, type=type)
if not datetime_as_string:
for feature in geo_data:
feature["properties"]["timestamp"] = _date_from_iso(feature["properties"]["timestamp"])
frame = gpd.GeoDataFrame.from_features(geo_data)
if empty_geometry == "none":
pass
elif empty_geometry == "empty":
frame = convert_null_geometry_to_empty(frame)
else:
raise ValueError("Unknown `empty_geometry` parameter `{}`".format(empty_geometry))
frame.crs = {"init":"epsg:4326"}
return frame
_sides = None
def _load_sides():
global _sides
if _sides is not None:
return
global _datadir
geojson = _path.join(_datadir, "Chicago_Areas.geojson")
frame = gpd.read_file(geojson)
side_mapping = {
"Far North" : [1,2,3,4,9,10,11,12,13,14,76,77],
"Northwest" : [15,16,17,18,19,20],
"North" : [5,6,7,21,22],
"West" : list(range(23, 32)),
"Central" : [8,32,33],
"South" : list(range(34,44)) + [60, 69],
"Southwest" : [56,57,58,59] + list(range(61,69)),
"Far Southwest" : list(range(70,76)),
"Far Southeast" : list(range(44,56))
}
frame["side"] = frame.area_numbe.map(lambda x : next(key
for key, item in side_mapping.items() if int(x) in item) )
_sides = frame.drop(["area", "area_num_1", "comarea", "comarea_id",
"perimeter", "shape_area", "shape_len"], axis=1)
_sides.crs = {"init": "epsg:4326"}
_sides = _sides.to_crs({"init": "epsg:2790"})
def get_side(name):
"""Return a geometry (a polygon, typically) of the outline of the shape
of the given "side" of Chicago, projected to {"init":"epsg:2790"}, which
is Illinois in metres.
Needs the file "Chicago_Areas.geojson" to be in the "datadir". This can
be downloaded from:
https://data.cityofchicago.org/Facilities-Geographic-Boundaries/Boundaries-Community-Areas-current-/cauq-8yn6
:param name: One of "Far North", "Northwest", "North", "West", "Central",
"South", "Southwest", "Far Southwest", "Far Southeast"
"""
_load_sides()
return _sides[_sides.side == name].unary_union
| 37.130919 | 113 | 0.641485 | 0 | 0 | 1,107 | 0.083046 | 0 | 0 | 0 | 0 | 6,533 | 0.490098 |
405e96dac8375ff59b836544a212c81d70fbb3ff | 2,140 | py | Python | Codility/Lesson/0011.Sieve-of-Eratosthenes/CountSemiprimes/CountSemiprimes.py | kimi0230/LeetcodeGolang | 2b276e49b67d7f66731ce6c629cd1390642af230 | [
"MIT"
]
| 4 | 2021-07-21T01:16:11.000Z | 2022-01-11T07:43:51.000Z | Codility/Lesson/0011.Sieve-of-Eratosthenes/CountSemiprimes/CountSemiprimes.py | kimi0230/LeetcodeGolang | 2b276e49b67d7f66731ce6c629cd1390642af230 | [
"MIT"
]
| null | null | null | Codility/Lesson/0011.Sieve-of-Eratosthenes/CountSemiprimes/CountSemiprimes.py | kimi0230/LeetcodeGolang | 2b276e49b67d7f66731ce6c629cd1390642af230 | [
"MIT"
]
| null | null | null | # https://github.com/Anfany/Codility-Lessons-By-Python3/blob/master/L11_Sieve%20of%20Eratosthenes/11.2%20CountSemiprimes.md
def solution(N, P, Q):
"""
返回由数组P、Q的元素组成的区间内,不大于N的半素数的个数, 时间复杂度O(N * log(log(N)) + M)
:param N: 半素数的最大值
:param P: 数组
:param Q: 数组
:return: 每次查询,得到的半素数的个数
"""
# 半素数只有3或4个因子,并且不能是素数的立方,例如(1, 3, 9, 27)(1, 5, 25, 125)这种情况
# 首先计算出不大于N的半素数列表,是半素数的为其值,不是的为0
semi_prime = []
k =0
for i in range(1, N + 1):
factor_count = 0
sign = 0
for j in range(1, int(i ** 0.5) + 1):
if i % j == 0:
factor_count += 1
f = i / j
if f != j:
if f == j ** 2:
sign = 1
semi_prime.append(0)
break
else:
factor_count += 1
if factor_count > 4:
sign = 1
semi_prime.append(0)
break
if sign != 1:
if factor_count >= 3:
semi_prime.append(i)
else:
semi_prime.append(0)
index_dict = {} # 得出当前数值以及前面一共有几个半素数
semi_dict = {} # 如果是半素数,则添加到字典中
count = 0
for index, value in enumerate(semi_prime):
if value != 0:
count += 1
index_dict[value] = count
semi_dict[value] = 0
else:
index_dict[index + 1] = count
# index_dict {1: 0, 2: 0, 3: 0, 4: 1, 5: 1, 6: 2, 7: 2, 8: 2, 9: 3, 10: 4, 11: 4, 12: 4, 13: 4, 14: 5, 15: 6, 16: 6, 17: 6, 18: 6, 19: 6, 20: 6, 21: 7, 22: 8, 23: 8, 24: 8, 25: 9, 26: 10}
#semi_dict {4: 0, 6: 0, 9: 0, 10: 0, 14: 0, 15: 0, 21: 0, 22: 0, 25: 0, 26: 0}
print("index_dict",index_dict)
print("semi_dict",semi_dict)
result_list = [] # 开始计算,在指定区间内有几个半素数
for i, j in zip(P, Q):
if i in semi_dict:
result_list.append(index_dict[j] - index_dict[i] + 1)
else:
result_list.append(index_dict[j] - index_dict[i])
return result_list
if __name__ == '__main__':
solution(26,[1, 4, 16],[26, 10, 20]) | 33.4375 | 191 | 0.482243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,045 | 0.425142 |
4060cef76afd120f8b88cf8abb7104b1c967dfca | 2,614 | py | Python | src/zope/formlib/errors.py | zopefoundation/zope.formlib | af2d587a6eb24e59e95a8b1feb7aafc5d3b87ba4 | [
"ZPL-2.1"
]
| 4 | 2018-05-09T04:16:25.000Z | 2021-03-05T17:27:21.000Z | src/zope/formlib/errors.py | zopefoundation/zope.formlib | af2d587a6eb24e59e95a8b1feb7aafc5d3b87ba4 | [
"ZPL-2.1"
]
| 25 | 2016-03-24T15:23:08.000Z | 2021-03-05T16:53:53.000Z | src/zope/formlib/errors.py | zopefoundation/zope.formlib | af2d587a6eb24e59e95a8b1feb7aafc5d3b87ba4 | [
"ZPL-2.1"
]
| 5 | 2015-02-11T13:32:06.000Z | 2018-05-09T04:16:26.000Z | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Error related things.
"""
try:
from html import escape
except ImportError: # pragma: NO COVER
from cgi import escape
from zope.component import adapter
from zope.interface import implementer
from zope.interface import Invalid
from zope.i18n import Message
from zope.i18n import translate
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.browser import BrowserPage
from zope.formlib.interfaces import IWidgetInputErrorView
from zope.formlib.interfaces import IInvalidCSRFTokenError
@implementer(IWidgetInputErrorView)
@adapter(Invalid, IBrowserRequest)
class InvalidErrorView(object):
"""Display a validation error as a snippet of text."""
def __init__(self, context, request):
self.context = context
self.request = request
def snippet(self):
"""Convert a widget input error to an html snippet
>>> from zope.interface.exceptions import Invalid
>>> error = Invalid("You made an error!")
>>> InvalidErrorView(error, None).snippet()
u'<span class="error">You made an error!</span>'
"""
msg = self.context.args[0]
if isinstance(msg, Message):
msg = translate(msg, context=self.request)
return u'<span class="error">%s</span>' % escape(msg)
@adapter(IInvalidCSRFTokenError, IBrowserRequest)
class InvalidCSRFTokenErrorView(BrowserPage):
def update(self):
self.request.response.setStatus(403)
self.request.response.setHeader(
'Expires', 'Jan, 1 Jan 1970 00:00:00 GMT')
self.request.response.setHeader(
'Cache-Control', 'no-store, no-cache, must-revalidate')
self.request.response.setHeader(
'Pragma', 'no-cache')
def render(self):
msg = self.context.args[0]
if isinstance(msg, Message):
msg = translate(msg, context=self.request)
return escape(msg)
def __call__(self):
self.update()
return self.render()
| 34.394737 | 78 | 0.653405 | 1,343 | 0.513772 | 0 | 0 | 1,464 | 0.560061 | 0 | 0 | 1,122 | 0.429227 |
4061946ebfbadada4a68b023604bd5475c508749 | 6,090 | py | Python | src/packagedcode/about.py | sthagen/nexB-scancode-toolkit | 12cc1286df78af898fae76fa339da2bb50ad51b9 | [
"Apache-2.0",
"CC-BY-4.0"
]
| null | null | null | src/packagedcode/about.py | sthagen/nexB-scancode-toolkit | 12cc1286df78af898fae76fa339da2bb50ad51b9 | [
"Apache-2.0",
"CC-BY-4.0"
]
| null | null | null | src/packagedcode/about.py | sthagen/nexB-scancode-toolkit | 12cc1286df78af898fae76fa339da2bb50ad51b9 | [
"Apache-2.0",
"CC-BY-4.0"
]
| null | null | null | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import io
import os
from pathlib import Path
import saneyaml
from packagedcode import models
from packageurl import PackageURL
# TODO: Override get_package_resource so it returns the Resource that the ABOUT file is describing
TRACE = os.environ.get('SCANCODE_DEBUG_PACKAGE', False)
def logger_debug(*args):
pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(
' '.join(isinstance(a, str) and a or repr(a) for a in args)
)
class AboutFileHandler(models.DatafileHandler):
datasource_id = 'about_file'
default_package_type = 'about'
path_patterns = ('*.ABOUT',)
description = 'AboutCode ABOUT file'
documentation_url = 'https://aboutcode-toolkit.readthedocs.io/en/latest/specification.html'
@classmethod
def parse(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
with io.open(location, encoding='utf-8') as loc:
package_data = saneyaml.load(loc.read())
# About files can contain any purl and also have a namespace
about_type = package_data.get('type')
about_ns = package_data.get('namespace')
purl_type = None
purl_ns = None
purl = package_data.get('purl')
if purl:
purl = PackageURL.from_string(purl)
if purl:
purl_type = purl.type
package_type = about_type or purl_type or cls.default_package_type
package_ns = about_ns or purl_ns
name = package_data.get('name')
version = package_data.get('version')
homepage_url = package_data.get('home_url') or package_data.get('homepage_url')
download_url = package_data.get('download_url')
copyright_statement = package_data.get('copyright')
license_expression = package_data.get('license_expression')
declared_license = license_expression
owner = package_data.get('owner')
if not isinstance(owner, str):
owner = repr(owner)
parties = [models.Party(type=models.party_person, name=owner, role='owner')]
# FIXME: also include notice_file and license_file(s) as file_references
file_references = []
about_resource = package_data.get('about_resource')
if about_resource:
file_references.append(models.FileReference(path=about_resource))
# FIXME: we should put the unprocessed attributes in extra data
yield models.PackageData(
datasource_id=cls.datasource_id,
type=package_type,
namespace=package_ns,
name=name,
version=version,
declared_license=declared_license,
license_expression=license_expression,
copyright=copyright_statement,
parties=parties,
homepage_url=homepage_url,
download_url=download_url,
file_references=file_references,
)
@classmethod
def assemble(cls, package_data, resource, codebase):
"""
Yield a Package. Note that ABOUT files do not carry dependencies.
"""
datafile_path = resource.path
# do we have enough to create a package?
if package_data.purl:
package = models.Package.from_package_data(
package_data=package_data,
datafile_path=datafile_path,
)
package_uid = package.package_uid
# NOTE: we do not attach files to the Package level. Instead we
# update `for_package` in the file
resource.for_packages.append(package_uid)
resource.save(codebase)
if not package.license_expression:
package.license_expression = cls.compute_normalized_license(package)
yield package
if resource.pid is not None and package_data.file_references:
parent_resource = resource.parent(codebase)
if parent_resource and package_data.file_references:
root_path = Path(parent_resource.path)
# FIXME: we should be able to get the path relatively to the
# ABOUT file resource a file ref extends from the root of
# the filesystem
file_references_by_path = {
str(root_path / ref.path): ref
for ref in package.file_references
}
for res in parent_resource.walk(codebase):
ref = file_references_by_path.get(res.path)
if not ref:
continue
# path is found and processed: remove it, so we can
# check if we found all of them
del file_references_by_path[res.path]
res.for_packages.append(package_uid)
res.save(codebase)
yield res
# if we have left over file references, add these to extra data
if file_references_by_path:
missing = sorted(file_references_by_path.values(), key=lambda r: r.path)
package.extra_data['missing_file_references'] = missing
else:
package.extra_data['missing_file_references'] = package_data.file_references[:]
# we yield this as we do not want this further processed
yield resource
| 36.25 | 98 | 0.621182 | 5,100 | 0.837438 | 4,769 | 0.783087 | 4,803 | 0.78867 | 0 | 0 | 1,696 | 0.278489 |
4061e49b5b1d7dddbcbb3f8df2b62b73c065877a | 2,359 | py | Python | gazepattern/eyedetector/admin.py | AriRodriguezCruz/mcfgpr | c6f83f8e68bbab0054a7ea337feab276fc0790fc | [
"MIT"
]
| null | null | null | gazepattern/eyedetector/admin.py | AriRodriguezCruz/mcfgpr | c6f83f8e68bbab0054a7ea337feab276fc0790fc | [
"MIT"
]
| 12 | 2020-06-05T22:56:39.000Z | 2022-02-10T10:35:13.000Z | gazepattern/eyedetector/admin.py | AriRodriguezCruz/mcfgpr | c6f83f8e68bbab0054a7ea337feab276fc0790fc | [
"MIT"
]
| 1 | 2019-10-06T23:40:45.000Z | 2019-10-06T23:40:45.000Z | # -*- coding: utf-8 -*-
#django
from django.contrib import admin
from django.db import transaction
#python
import csv
from decimal import Decimal
#gazepattern
from .models import Experiment, ExperimentPoint, Image, ImageRectangle, ExperimentPointCSV, ExperimentFunction
@transaction.atomic
def procesar(modeladmin, request, queryset):
for query in queryset:
file = query.file
with open(file.path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
rows = [row for row in csv_reader if len(row)]
for row in rows:
experiment_id = int(row[0])
fixation_number = int(row[1])
x = Decimal(row[2])
y = Decimal(row[3])
experiment = Experiment.objects.get(pk=experiment_id)
experiment_point = ExperimentPoint()
experiment_point.experiment = experiment
experiment_point.fixation_number = fixation_number
experiment_point.x = x
experiment_point.y = y
experiment_point.save()
procesar.short_description = "Procesar CSV para generar experiments points"
class ExperimentPointCSVAdmin(admin.ModelAdmin):
list_display = ['id', 'file']
ordering = ['id']
actions = [procesar, ]
class ExperimentPointAdmin(admin.ModelAdmin):
list_display = ['id', 'experiment_id', 'fixation_number', 'x', 'y']
ordering = ['id']
search_fields = ["experiment__id"]
class ImageAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
ordering = ['id']
class ExperimentAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'description']
ordering = ['id']
class ImageRectangleAdmin(admin.ModelAdmin):
list_display = ['id', 'image_id','name']
ordering = ['id']
search_fields = ['image__id']
class ExperimentFunctionAdmin(admin.ModelAdmin):
list_display = ['id', 'experiment_id', 'function']
ordering = ['id']
search_fields = ['experiment__id']
admin.site.register(ExperimentPointCSV, ExperimentPointCSVAdmin)
admin.site.register(ExperimentPoint, ExperimentPointAdmin)
admin.site.register(Image, ImageAdmin)
admin.site.register(Experiment, ExperimentAdmin)
admin.site.register(ImageRectangle, ImageRectangleAdmin)
admin.site.register(ExperimentFunction, ExperimentFunctionAdmin) | 31.878378 | 110 | 0.676982 | 820 | 0.347605 | 0 | 0 | 836 | 0.354387 | 0 | 0 | 299 | 0.126749 |
4061ef1026efc595fdfdf42014af88613e5012a6 | 2,634 | py | Python | orders/tests/test_views.py | ms0680146/Order_System | 934c3849ad0d72c0ce560706a6857870935e8599 | [
"MIT"
]
| null | null | null | orders/tests/test_views.py | ms0680146/Order_System | 934c3849ad0d72c0ce560706a6857870935e8599 | [
"MIT"
]
| null | null | null | orders/tests/test_views.py | ms0680146/Order_System | 934c3849ad0d72c0ce560706a6857870935e8599 | [
"MIT"
]
| null | null | null | from django.test import TestCase, Client
from django.urls import reverse
from orders.models import Order, OrderItem
from datetime import datetime
from django.utils.timezone import get_current_timezone
import pytz
class TestViews(TestCase):
def setUp(self):
self.client = Client()
def test_home_GET(self):
response = self.client.get(reverse('home'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'layout.html')
def test_piechart_GET(self):
response = self.client.get(reverse('piechart'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'pages/piechart.html')
def test_cohort_GET(self):
response = self.client.get(reverse('cohort'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'pages/cohort.html')
def test_barchart_GET(self):
response = self.client.get(reverse('barchart'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'pages/barchart.html')
def test_get_shipping_data_GET(self):
tz = get_current_timezone()
shipping_free = Order.objects.create(
order_id=1,
customer_id=10,
shipping=0,
created_at=tz.localize(datetime.now())
)
shipping_need = Order.objects.create(
order_id=2,
customer_id=14,
shipping=80,
created_at=tz.localize(datetime.now())
)
response = self.client.get(reverse('api-shipping-data'))
self.assertJSONEqual(response.content, {"labels": ["free shipping", "need shipping"], "counts": [1, 1]})
def test_get_top3_products_GET(self):
product1 = OrderItem.objects.create(
order_id=1,
product_name='product1',
qty=3
)
product2 = OrderItem.objects.create(
order_id=2,
product_name='product2',
qty=2
)
product2_1 = OrderItem.objects.create(
order_id=3,
product_name='product2',
qty=5
)
product3 = OrderItem.objects.create(
order_id=4,
product_name='product3',
qty=1
)
product4 = OrderItem.objects.create(
order_id=5,
product_name='product4',
qty=2
)
response = self.client.get(reverse('api-top3-products'))
self.assertJSONEqual(response.content, {"labels": ["product2", "product1", "product4"], "counts": [7, 3, 2]}) | 34.207792 | 118 | 0.612756 | 2,420 | 0.918755 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.109339 |
406203c920d38242adfa5e5ed2a39070a52fd1c1 | 373 | py | Python | codigo/hexagonal/app/adapter/light_bulb_repository.py | VulturARG/charla_01 | 43a53fded4f3205a02b00993a523e2f94b79fc99 | [
"Apache-2.0"
]
| null | null | null | codigo/hexagonal/app/adapter/light_bulb_repository.py | VulturARG/charla_01 | 43a53fded4f3205a02b00993a523e2f94b79fc99 | [
"Apache-2.0"
]
| null | null | null | codigo/hexagonal/app/adapter/light_bulb_repository.py | VulturARG/charla_01 | 43a53fded4f3205a02b00993a523e2f94b79fc99 | [
"Apache-2.0"
]
| null | null | null | from codigo.hexagonal.app.domain.switchable_repository import Switchable
class LightBulb(Switchable):
def turn_on(self) -> bool:
print("Connecting with the device...")
print("The light is on")
return True
def turn_off(self) -> bool:
print("The light is off")
print("Disconnecting with the device...")
return False
| 26.642857 | 72 | 0.646113 | 297 | 0.796247 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.268097 |
4062ba894ee618c56f6c5822e3859495a6c3298f | 541 | py | Python | aula12/ex1.py | otaviobizulli/python-exercices | 2c61f014bf481fa463721b174ddd4238bf8d0cb3 | [
"MIT"
]
| null | null | null | aula12/ex1.py | otaviobizulli/python-exercices | 2c61f014bf481fa463721b174ddd4238bf8d0cb3 | [
"MIT"
]
| null | null | null | aula12/ex1.py | otaviobizulli/python-exercices | 2c61f014bf481fa463721b174ddd4238bf8d0cb3 | [
"MIT"
]
| null | null | null | from random import randint
menor = 100
linha = 0
maior = 0
m = []
for i in range(10):
m.append([])
for j in range(10):
m[i].append(randint(1,99))
for i in range(10):
for j in range(10):
print(f'{m[i][j]:2}',end=' ')
print()
for i in range(10):
for j in range(10):
if m[i][j] > maior:
maior = m[i][j]
linha = i
for i in range(10):
if m[linha][i] < menor:
menor = m[linha][i]
print(f'o minimax é {menor}, com o maior sendo {maior} na linha {linha+1}.')
| 16.393939 | 76 | 0.51756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.160517 |
4063e065b5e1d8a9952507fe4d95419e55a2613a | 1,153 | py | Python | src/token_classification/format.py | adriens63/BERT_fine_tuning_for_MLM_and_token_classification | 89ff0d8ed12da370b1f8757ae9db8d725143a5bb | [
"Apache-2.0"
]
| null | null | null | src/token_classification/format.py | adriens63/BERT_fine_tuning_for_MLM_and_token_classification | 89ff0d8ed12da370b1f8757ae9db8d725143a5bb | [
"Apache-2.0"
]
| 1 | 2021-12-10T15:26:05.000Z | 2021-12-10T15:26:05.000Z | src/token_classification/format.py | adriens63/BERT_fine_tuning_for_MLM_and_token_classification | 89ff0d8ed12da370b1f8757ae9db8d725143a5bb | [
"Apache-2.0"
]
| 3 | 2021-12-05T12:43:23.000Z | 2021-12-10T15:42:40.000Z | import os.path as osp
import argparse
import yaml
from src.token_classification.archs.data_formatter import *
# ********************* launch formating ***********************
# cmd to launch : python -m src.token_classification.format --config ./src/token_classification/config/config.yml
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'formatting for labeling')
parser.add_argument('--config', type=str, required=True, help='path to yaml config')
args = parser.parse_args()
with open(args.config, 'r') as f:
config = yaml.safe_load(f)
asigning_variables(config)
print('.... Start formatting')
path = osp.join(config['path'], config['offres'])
yaml_path = osp.join(config['path'], config['yaml'])
formatter = Formatter(path, yaml_path)
formatter.generate_name()
formatter.load()
formatter.sort_desc()
formatter.format_to_jsonl_in_proportions(n_desc = config['n_sequences'])
print('done;')
print()
print('/!\ Be careful to change the owner of the file before pasting it in doccano with the following command : sudo chown <user> <file>')
| 32.942857 | 142 | 0.674761 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 446 | 0.386817 |
4063f5350f19ec0fcf289e841719b7191b72872c | 6,393 | py | Python | add.py | cleolepart/timedomain | 340e3fa614bca2dc333c9723893951318356dccf | [
"MIT"
]
| null | null | null | add.py | cleolepart/timedomain | 340e3fa614bca2dc333c9723893951318356dccf | [
"MIT"
]
| null | null | null | add.py | cleolepart/timedomain | 340e3fa614bca2dc333c9723893951318356dccf | [
"MIT"
]
| null | null | null | from __future__ import absolute_import, division, print_function
import os, sys, time
import numpy as np
import scipy.sparse
import scipy.linalg
import scipy.sparse.linalg
from astropy.table import Table, Column
import multiprocessing
from desiutil.log import get_logger
from desispec.interpolation import resample_flux
from desispec.spectra import Spectra
from desispec.resolution import Resolution
from desispec.fiberbitmasking import get_all_fiberbitmask_with_amp, get_all_nonamp_fiberbitmask_val, get_justamps_fiberbitmask
from desispec.specscore import compute_coadd_scores
from desispec.coaddition import coadd_fibermap
def add(spectra, cosmics_nsig=0.) :
"""
Coaddition the spectra for each target and each camera. The input spectra is modified.
Args:
spectra: desispec.spectra.Spectra object
Options:
cosmics_nsig: float, nsigma clipping threshold for cosmics rays
"""
log = get_logger()
targets = np.unique(spectra.fibermap["TARGETID"])
ntarget=targets.size
log.debug("number of targets= {}".format(ntarget))
for b in spectra.bands :
log.debug("coadding band '{}'".format(b))
nwave=spectra.wave[b].size
tflux=np.zeros((ntarget,nwave),dtype=spectra.flux[b].dtype)
tivar=np.zeros((ntarget,nwave),dtype=spectra.ivar[b].dtype)
if spectra.mask is not None :
tmask=np.zeros((ntarget,nwave),dtype=spectra.mask[b].dtype)
else :
tmask=None
trdata=np.zeros((ntarget,spectra.resolution_data[b].shape[1],nwave),dtype=spectra.resolution_data[b].dtype)
fiberstatus_bits = get_all_fiberbitmask_with_amp(b)
good_fiberstatus = ( (spectra.fibermap["FIBERSTATUS"] & fiberstatus_bits) == 0 )
for i,tid in enumerate(targets) :
jj=np.where( (spectra.fibermap["TARGETID"]==tid) & good_fiberstatus )[0]
#- if all spectra were flagged as bad (FIBERSTATUS != 0), contine
#- to next target, leaving tflux and tivar=0 for this target
if len(jj) == 0:
continue
if cosmics_nsig is not None and cosmics_nsig > 0 and len(jj)>2 :
# interpolate over bad measurements
# to be able to compute gradient next
# to a bad pixel and identify outlier
# many cosmics residuals are on edge
# of cosmic ray trace, and so can be
# next to a masked flux bin
grad=[]
gradvar=[]
for j in jj :
if spectra.mask is not None :
ttivar = spectra.ivar[b][j]*(spectra.mask[b][j]==0)
else :
ttivar = spectra.ivar[b][j]
good = (ttivar>0)
bad = ~good
if np.sum(good)==0 :
continue
nbad = np.sum(bad)
ttflux = spectra.flux[b][j].copy()
if nbad>0 :
ttflux[bad] = np.interp(spectra.wave[b][bad],spectra.wave[b][good],ttflux[good])
ttivar = spectra.ivar[b][j].copy()
if nbad>0 :
ttivar[bad] = np.interp(spectra.wave[b][bad],spectra.wave[b][good],ttivar[good])
ttvar = 1./(ttivar+(ttivar==0))
ttflux[1:] = ttflux[1:]-ttflux[:-1]
ttvar[1:] = ttvar[1:]+ttvar[:-1]
ttflux[0] = 0
grad.append(ttflux)
gradvar.append(ttvar)
#tivar_unmasked= np.sum(spectra.ivar[b][jj],axis=0)
tivar_unmasked = 1 / np.sum(1/spectra.ivar[b][jj],axis=0)
if spectra.mask is not None :
ivarjj=spectra.ivar[b][jj]*(spectra.mask[b][jj]==0)
else :
ivarjj=spectra.ivar[b][jj]
if cosmics_nsig is not None and cosmics_nsig > 0 and len(jj)>2 :
grad=np.array(grad)
gradvar=np.array(gradvar)
gradivar=(gradvar>0)/np.array(gradvar+(gradvar==0))
nspec=grad.shape[0]
sgradivar=np.sum(gradivar)
if sgradivar>0 :
meangrad=np.sum(gradivar*grad,axis=0)/sgradivar
deltagrad=grad-meangrad
chi2=np.sum(gradivar*deltagrad**2,axis=0)/(nspec-1)
bad = (chi2>cosmics_nsig**2)
nbad = np.sum(bad)
if nbad>0 :
log.info("masking {} values for targetid={}".format(nbad,tid))
badindex=np.where(bad)[0]
for bi in badindex :
k=np.argmax(gradivar[:,bi]*deltagrad[:,bi]**2)
ivarjj[k,bi]=0.
log.debug("masking spec {} wave={}".format(k,spectra.wave[b][bi]))
#tivar[i]=np.sum(ivarjj,axis=0)
tivar[i]= 1 / np.sum(1/ivarjj,axis=0)
tflux[i]=np.sum(spectra.flux[b][jj],axis=0)
for r in range(spectra.resolution_data[b].shape[1]) :
trdata[i,r]=np.sum((spectra.resolution_data[b][jj,r]),axis=0) # not sure applying mask is wise here
bad=(tivar[i]==0)
if np.sum(bad)>0 :
tivar[i][bad] = 1 / np.sum(1/spectra.ivar[b][jj][:,bad],axis=0) # if all masked, keep original ivar
tflux[i][bad] = np.sum(spectra.flux[b][jj][:,bad],axis=0)
ok=(tivar[i]>0)
#if np.sum(ok)>0 :
#tflux[i][ok] /= tivar[i][ok]
ok=(tivar_unmasked>0)
if np.sum(ok)>0 :
trdata[i][:,ok] /= tivar_unmasked[ok]
if spectra.mask is not None :
tmask[i] = np.bitwise_or.reduce(spectra.mask[b][jj],axis=0)
spectra.flux[b] = tflux
spectra.ivar[b] = tivar
if spectra.mask is not None :
spectra.mask[b] = tmask
spectra.resolution_data[b] = trdata
if spectra.scores is not None:
orig_scores = Table(spectra.scores.copy())
orig_scores['TARGETID'] = spectra.fibermap['TARGETID']
else:
orig_scores = None
spectra.fibermap=coadd_fibermap(spectra.fibermap)
spectra.scores=None
compute_coadd_scores(spectra, orig_scores, update_coadd=True)
| 40.980769 | 126 | 0.552323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 936 | 0.14641 |
40642da36f0613fe957f14edea19df84f13b530a | 2,525 | py | Python | pontoon/pretranslation/tests/test_pretranslate.py | timvisee/pontoon | aec1ef7b5c5d56c3be28fecf1147945d2622bbad | [
"BSD-3-Clause"
]
| null | null | null | pontoon/pretranslation/tests/test_pretranslate.py | timvisee/pontoon | aec1ef7b5c5d56c3be28fecf1147945d2622bbad | [
"BSD-3-Clause"
]
| null | null | null | pontoon/pretranslation/tests/test_pretranslate.py | timvisee/pontoon | aec1ef7b5c5d56c3be28fecf1147945d2622bbad | [
"BSD-3-Clause"
]
| null | null | null | from mock import patch
import pytest
from pontoon.base.models import User
from pontoon.pretranslation.pretranslate import get_translations
from pontoon.test.factories import (
EntityFactory,
TranslationMemoryFactory,
)
@patch("pontoon.pretranslation.pretranslate.get_google_translate_data")
@pytest.mark.django_db
def test_get_translations(gt_mock, locale_b, resource_a, google_translate_locale):
entities = [
EntityFactory(resource=resource_a, string=x, order=i)
for i, x in enumerate(["abaa", "abac", "aaab", "abab"])
]
entities[1].string_plural = entities[1].string
entities[3].string_plural = entities[3].string
entities[1].save()
entities[3].save()
google_translate_locale.cldr_plurals = "1, 2"
google_translate_locale.save()
for entity in entities[0:2]:
TranslationMemoryFactory.create(
entity=entity, source=entity.string, target=entity.string, locale=locale_b,
)
TranslationMemoryFactory.create(
entity=entity,
source=entity.string,
target=entity.string,
locale=google_translate_locale,
)
# Mock the return value of get_google_translate_data
gt_mock.return_value = {
"status": True,
"translation": "gt_translation",
}
tm_user = User.objects.get(email="[email protected]")
gt_user = User.objects.get(email="[email protected]")
# 100% match exists in translation memory.
response_a = get_translations(entities[0], locale_b)
response_b = get_translations(entities[0], google_translate_locale)
assert response_a == [(entities[0].string, None, tm_user)]
assert response_b == [(entities[0].string, None, tm_user)]
# 100% match does not exists and locale.google_translate_code is None.
response = get_translations(entities[2], locale_b)
assert response == []
# 100% match does not exists and locale.google_translate_code is not None.
response = get_translations(entities[2], google_translate_locale)
assert response == [("gt_translation", None, gt_user)]
# Entity.string_plural is not None.
response_a = get_translations(entities[1], google_translate_locale)
response_b = get_translations(entities[3], google_translate_locale)
assert response_a == [
(entities[1].string, 0, tm_user),
(entities[1].string, 1, tm_user),
]
assert response_b == [
("gt_translation", 0, gt_user),
("gt_translation", 1, gt_user),
]
| 34.121622 | 87 | 0.693069 | 0 | 0 | 0 | 0 | 2,293 | 0.908119 | 0 | 0 | 499 | 0.197624 |
406526a2d40a76aa8b9a7ce0c6aadecb3ce65af4 | 9,615 | py | Python | cubes/common.py | digitalsatori/cubes | 140133e8c2e3f2ff60631cc3ebc9966d16c1655e | [
"MIT"
]
| 1,020 | 2015-01-02T03:05:26.000Z | 2022-02-12T18:48:51.000Z | cubes/common.py | digitalsatori/cubes | 140133e8c2e3f2ff60631cc3ebc9966d16c1655e | [
"MIT"
]
| 259 | 2015-01-02T22:35:14.000Z | 2021-09-02T04:20:41.000Z | cubes/common.py | digitalsatori/cubes | 140133e8c2e3f2ff60631cc3ebc9966d16c1655e | [
"MIT"
]
| 288 | 2015-01-08T00:42:26.000Z | 2022-03-31T17:25:10.000Z | # -*- encoding: utf-8 -*-
"""Utility functions for computing combinations of dimensions and hierarchy
levels"""
from __future__ import absolute_import
import re
import os.path
import json
from collections import OrderedDict
from .errors import ModelInconsistencyError, ArgumentError, ConfigurationError
from . import compat
__all__ = [
"IgnoringDictionary",
"MissingPackage",
"localize_common",
"localize_attributes",
"get_localizable_attributes",
"decamelize",
"to_identifier",
"assert_instance",
"assert_all_instances",
"read_json_file",
"sorted_dependencies",
]
class IgnoringDictionary(OrderedDict):
"""Simple dictionary extension that will ignore any keys of which values
are empty (None/False)"""
def __setitem__(self, key, value):
if value is not None:
super(IgnoringDictionary, self).__setitem__(key, value)
def set(self, key, value):
"""Sets `value` for `key` even if value is null."""
super(IgnoringDictionary, self).__setitem__(key, value)
def __repr__(self):
items = []
for key, value in self.items():
item = '%s: %s' % (repr(key), repr(value))
items.append(item)
return "{%s}" % ", ".join(items)
def assert_instance(obj, class_, label):
"""Raises ArgumentError when `obj` is not instance of `cls`"""
if not isinstance(obj, class_):
raise ModelInconsistencyError("%s should be sublcass of %s, "
"provided: %s" % (label,
class_.__name__,
type(obj).__name__))
def assert_all_instances(list_, class_, label="object"):
"""Raises ArgumentError when objects in `list_` are not instances of
`cls`"""
for obj in list_ or []:
assert_instance(obj, class_, label="object")
class MissingPackageError(Exception):
"""Exception raised when encountered a missing package."""
pass
class MissingPackage(object):
"""Bogus class to handle missing optional packages - packages that are not
necessarily required for Cubes, but are needed for certain features."""
def __init__(self, package, feature = None, source = None, comment = None):
self.package = package
self.feature = feature
self.source = source
self.comment = comment
def __call__(self, *args, **kwargs):
self._fail()
def __getattr__(self, name):
self._fail()
def _fail(self):
if self.feature:
use = " to be able to use: %s" % self.feature
else:
use = ""
if self.source:
source = " from %s" % self.source
else:
source = ""
if self.comment:
comment = ". %s" % self.comment
else:
comment = ""
raise MissingPackageError("Optional package '%s' is not installed. "
"Please install the package%s%s%s" %
(self.package, source, use, comment))
def optional_import(name, feature=None, source=None, comment=None):
"""Optionally import package `name`. If package does not exist, import a
placeholder object, that raises an exception with more detailed
description about the missing package."""
try:
return __import__(name)
except ImportError:
return MissingPackage(name, feature, source, comment)
def expand_dictionary(record, separator='.'):
"""Return expanded dictionary: treat keys are paths separated by
`separator`, create sub-dictionaries as necessary"""
result = {}
for key, value in record.items():
current = result
path = key.split(separator)
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = value
return result
def localize_common(obj, trans):
"""Localize common attributes: label and description"""
if "label" in trans:
obj.label = trans["label"]
if "description" in trans:
obj.description = trans["description"]
def localize_attributes(attribs, translations):
"""Localize list of attributes. `translations` should be a dictionary with
keys as attribute names, values are dictionaries with localizable
attribute metadata, such as ``label`` or ``description``."""
for (name, atrans) in translations.items():
attrib = attribs[name]
localize_common(attrib, atrans)
def get_localizable_attributes(obj):
"""Returns a dictionary with localizable attributes of `obj`."""
# FIXME: use some kind of class attribute to get list of localizable attributes
locale = {}
try:
if obj.label:
locale["label"] = obj.label
except:
pass
try:
if obj.description:
locale["description"] = obj.description
except:
pass
return locale
def decamelize(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1)
def to_identifier(name):
return re.sub(r' ', r'_', name).lower()
def to_label(name, capitalize=True):
"""Converts `name` into label by replacing underscores by spaces. If
`capitalize` is ``True`` (default) then the first letter of the label is
capitalized."""
label = name.replace("_", " ")
if capitalize:
label = label.capitalize()
return label
def coalesce_option_value(value, value_type, label=None):
"""Convert string into an object value of `value_type`. The type might be:
`string` (no conversion), `integer`, `float`, `list` – comma separated
list of strings.
"""
value_type = value_type.lower()
try:
if value_type in ('string', 'str'):
return_value = str(value)
elif value_type == 'list':
if isinstance(value, compat.string_type):
return_value = value.split(",")
else:
return_value = list(value)
elif value_type == "float":
return_value = float(value)
elif value_type in ["integer", "int"]:
return_value = int(value)
elif value_type in ["bool", "boolean"]:
if not value:
return_value = False
elif isinstance(value, compat.string_type):
return_value = value.lower() in ["1", "true", "yes", "on"]
else:
return_value = bool(value)
else:
raise ArgumentError("Unknown option value type %s" % value_type)
except ValueError:
if label:
label = "parameter %s " % label
else:
label = ""
raise ArgumentError("Unable to convert %svalue '%s' into type %s" %
(label, astring, value_type))
return return_value
def coalesce_options(options, types):
"""Coalesce `options` dictionary according to types dictionary. Keys in
`types` refer to keys in `options`, values of `types` are value types:
string, list, float, integer or bool."""
out = {}
for key, value in options.items():
if key in types:
out[key] = coalesce_option_value(value, types[key], key)
else:
out[key] = value
return out
def read_json_file(path, kind=None):
"""Read a JSON from `path`. This is convenience function that provides
more descriptive exception handling."""
kind = "%s " % str(kind) if kind else ""
if not os.path.exists(path):
raise ConfigurationError("Can not find %sfile '%s'"
% (kind, path))
try:
f = compat.open_unicode(path)
except IOError:
raise ConfigurationError("Can not open %sfile '%s'"
% (kind, path))
try:
content = json.load(f)
except ValueError as e:
raise SyntaxError("Syntax error in %sfile %s: %s"
% (kind, path, str(e)))
finally:
f.close()
return content
def sorted_dependencies(graph):
"""Return keys from `deps` ordered by dependency (topological sort).
`deps` is a dictionary where keys are strings and values are list of
strings where keys is assumed to be dependant on values.
Example::
A ---> B -+--> C
|
+--> D --> E
Will be: ``{"A": ["B"], "B": ["C", "D"], "D": ["E"],"E": []}``
"""
graph = dict((key, set(value)) for key, value in graph.items())
# L ← Empty list that will contain the sorted elements
L = []
# S ← Set of all nodes with no dependencies (incoming edges)
S = set(parent for parent, req in graph.items() if not req)
while S:
# remove a node n from S
n = S.pop()
# insert n into L
L.append(n)
# for each node m with an edge e from n to m do
# (n that depends on m)
parents = [parent for parent, req in graph.items() if n in req]
for parent in parents:
graph[parent].remove(n)
# remove edge e from the graph
# if m has no other incoming edges then insert m into S
if not graph[parent]:
S.add(parent)
# if graph has edges then -> error
nonempty = [k for k, v in graph.items() if v]
if nonempty:
raise ArgumentError("Cyclic dependency of: %s"
% ", ".join(nonempty))
return L
| 30.141066 | 83 | 0.584191 | 1,843 | 0.19156 | 0 | 0 | 0 | 0 | 0 | 0 | 3,552 | 0.369192 |
40665e1c58be6db40c3e5c0613a58755896c8a6f | 4,366 | py | Python | wavenet_iaf.py | Ella77/ClariNet | 1a2eea899f5c28b34beb6fb08725f38309e7e053 | [
"MIT"
]
| 126 | 2019-05-23T03:37:43.000Z | 2021-08-02T20:15:22.000Z | wavenet_iaf.py | Ella77/ClariNet | 1a2eea899f5c28b34beb6fb08725f38309e7e053 | [
"MIT"
]
| 4 | 2019-06-05T11:30:51.000Z | 2022-03-17T09:01:29.000Z | wavenet_iaf.py | Ella77/ClariNet | 1a2eea899f5c28b34beb6fb08725f38309e7e053 | [
"MIT"
]
| 24 | 2019-05-23T03:37:39.000Z | 2021-12-23T22:29:01.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules import Conv, ResBlock
class Wavenet_Student(nn.Module):
def __init__(self, num_blocks_student=[1, 1, 1, 1, 1, 1], num_layers=10,
front_channels=32, residual_channels=64, gate_channels=128, skip_channels=64,
kernel_size=3, cin_channels=80, causal=True):
super(Wavenet_Student, self).__init__()
self.num_blocks = num_blocks_student
self.num_flow = len(self.num_blocks)
self.num_layers = num_layers
self.iafs = nn.ModuleList()
for i in range(self.num_flow):
self.iafs.append(Wavenet_Flow(out_channels=2,
num_blocks=self.num_blocks[i], num_layers=self.num_layers,
front_channels=front_channels, residual_channels=residual_channels,
gate_channels=gate_channels, skip_channels=skip_channels,
kernel_size=kernel_size, cin_channels=cin_channels, causal=causal))
def forward(self, z, c):
return self.iaf(z, c)
def iaf(self, z, c_up):
mu_tot, logs_tot = 0., 0.
for i, iaf in enumerate(self.iafs):
mu_logs = iaf(z, c_up)
mu = mu_logs[:, 0:1, :-1]
logs = mu_logs[:, 1:, :-1]
mu_tot = mu_tot * torch.exp(logs) + mu
logs_tot = logs_tot + logs
z = z[:, :, 1:] * torch.exp(logs) + mu
z = F.pad(z, pad=(1, 0), mode='constant', value=0)
return z, mu_tot, logs_tot
def receptive_field(self):
receptive_field = 1
for iaf in self.iafs:
receptive_field += iaf.receptive_field_size() - 1
return receptive_field
def generate(self, z, c_up):
x, _, _ = self.iaf(z, c_up)
return x
def remove_weight_norm(self):
for iaf in self.iafs:
iaf.remove_weight_norm()
class Wavenet_Flow(nn.Module):
def __init__(self, out_channels=1, num_blocks=1, num_layers=10,
front_channels=32, residual_channels=64, gate_channels=32, skip_channels=None,
kernel_size=3, cin_channels=80, causal=True):
super(Wavenet_Flow, self). __init__()
self.causal = causal
self.num_blocks = num_blocks
self.num_layers = num_layers
self.front_channels = front_channels
self.out_channels = out_channels
self.gate_channels = gate_channels
self.residual_channels = residual_channels
self.skip_channels = skip_channels
self.cin_channels = cin_channels
self.kernel_size = kernel_size
self.front_conv = nn.Sequential(
Conv(1, self.residual_channels, self.front_channels, causal=self.causal),
nn.ReLU()
)
self.res_blocks = nn.ModuleList()
self.res_blocks_fast = nn.ModuleList()
for b in range(self.num_blocks):
for n in range(self.num_layers):
self.res_blocks.append(ResBlock(self.residual_channels, self.gate_channels, self.skip_channels,
self.kernel_size, dilation=2**n,
cin_channels=self.cin_channels, local_conditioning=True,
causal=self.causal, mode='SAME'))
self.final_conv = nn.Sequential(
nn.ReLU(),
Conv(self.skip_channels, self.skip_channels, 1, causal=self.causal),
nn.ReLU(),
Conv(self.skip_channels, self.out_channels, 1, causal=self.causal)
)
def forward(self, x, c):
return self.wavenet(x, c)
def wavenet(self, tensor, c=None):
h = self.front_conv(tensor)
skip = 0
for i, f in enumerate(self.res_blocks):
h, s = f(h, c)
skip += s
out = self.final_conv(skip)
return out
def receptive_field_size(self):
num_dir = 1 if self.causal else 2
dilations = [2 ** (i % self.num_layers) for i in range(self.num_layers * self.num_blocks)]
return num_dir * (self.kernel_size - 1) * sum(dilations) + 1 + (self.front_channels - 1)
def remove_weight_norm(self):
for f in self.res_blocks:
f.remove_weight_norm()
| 39.690909 | 111 | 0.584517 | 4,258 | 0.975263 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.003665 |
4066b6af1e7dfd680248198011c778dca827452b | 828 | py | Python | tests/__init__.py | ybelleguic/openbrokerapi | 9d6019dd1b6649c9d0cb0dee4b3236e0ee209dbc | [
"MIT"
]
| 36 | 2017-10-06T15:16:21.000Z | 2021-07-30T16:25:59.000Z | tests/__init__.py | ybelleguic/openbrokerapi | 9d6019dd1b6649c9d0cb0dee4b3236e0ee209dbc | [
"MIT"
]
| 167 | 2017-09-28T23:38:33.000Z | 2022-03-28T21:18:49.000Z | tests/__init__.py | ybelleguic/openbrokerapi | 9d6019dd1b6649c9d0cb0dee4b3236e0ee209dbc | [
"MIT"
]
| 26 | 2017-09-29T13:46:38.000Z | 2022-01-05T08:49:25.000Z | try:
from gevent import monkey
monkey.patch_all()
except ImportError:
# fine if no gevent is available
pass
import base64
import logging
from unittest.mock import Mock
from flask.app import Flask
from flask_testing import TestCase
from openbrokerapi.api import BrokerCredentials
from openbrokerapi.log_util import basic_config
class BrokerTestCase(TestCase):
auth_header = 'Basic ' + base64.b64encode(b":").decode("ascii")
def create_app(self):
from openbrokerapi.api import get_blueprint
app = Flask(__name__)
self.broker = Mock()
app.register_blueprint(
get_blueprint(self.broker,
BrokerCredentials("", ""),
basic_config(level=logging.WARN)
)
)
return app
| 23.657143 | 67 | 0.641304 | 479 | 0.578502 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.066425 |
4067311b4e6925a510e59163839cef51d453a910 | 5,234 | py | Python | ansible/lib/ansible/modules/extras/network/f5/bigip_gtm_wide_ip.py | kiv-box/kafka | debec1c4bc8c43776070ee447a53b55fef42bd52 | [
"Apache-2.0"
]
| null | null | null | ansible/lib/ansible/modules/extras/network/f5/bigip_gtm_wide_ip.py | kiv-box/kafka | debec1c4bc8c43776070ee447a53b55fef42bd52 | [
"Apache-2.0"
]
| null | null | null | ansible/lib/ansible/modules/extras/network/f5/bigip_gtm_wide_ip.py | kiv-box/kafka | debec1c4bc8c43776070ee447a53b55fef42bd52 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Michael Perzel
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_gtm_wide_ip
short_description: "Manages F5 BIG-IP GTM wide ip"
description:
- "Manages F5 BIG-IP GTM wide ip"
version_added: "2.0"
author:
- Michael Perzel (@perzizzle)
- Tim Rupp (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11.4"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Tested with manager and above account privilege level"
requirements:
- bigsuds
options:
lb_method:
description:
- LB method of wide ip
required: true
choices: ['return_to_dns', 'null', 'round_robin',
'ratio', 'topology', 'static_persist', 'global_availability',
'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
wide_ip:
description:
- Wide IP name
required: true
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Set lb method
local_action: >
bigip_gtm_wide_ip
server=192.0.2.1
user=admin
password=mysecret
lb_method=round_robin
wide_ip=my-wide-ip.example.com
'''
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.f5 import bigip_api, f5_argument_spec
def get_wide_ip_lb_method(api, wide_ip):
lb_method = api.GlobalLB.WideIP.get_lb_method(wide_ips=[wide_ip])[0]
lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
return lb_method
def get_wide_ip_pools(api, wide_ip):
try:
return api.GlobalLB.WideIP.get_wideip_pool([wide_ip])
except Exception:
e = get_exception()
print(e)
def wide_ip_exists(api, wide_ip):
# hack to determine if wide_ip exists
result = False
try:
api.GlobalLB.WideIP.get_object_status(wide_ips=[wide_ip])
result = True
except bigsuds.OperationFailed:
e = get_exception()
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def set_wide_ip_lb_method(api, wide_ip, lb_method):
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.GlobalLB.WideIP.set_lb_method(wide_ips=[wide_ip], lb_methods=[lb_method])
def main():
argument_spec = f5_argument_spec()
lb_method_choices = ['return_to_dns', 'null', 'round_robin',
'ratio', 'topology', 'static_persist', 'global_availability',
'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
meta_args = dict(
lb_method = dict(type='str', required=True, choices=lb_method_choices),
wide_ip = dict(type='str', required=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
wide_ip = module.params['wide_ip']
lb_method = module.params['lb_method']
validate_certs = module.params['validate_certs']
result = {'changed': False} # default
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
if not wide_ip_exists(api, wide_ip):
module.fail_json(msg="wide ip %s does not exist" % wide_ip)
if lb_method is not None and lb_method != get_wide_ip_lb_method(api, wide_ip):
if not module.check_mode:
set_wide_ip_lb_method(api, wide_ip, lb_method)
result = {'changed': True}
else:
result = {'changed': True}
except Exception:
e = get_exception()
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 31.914634 | 97 | 0.643676 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,564 | 0.489874 |
4067cec9a6ceb8438c7e66edc2d29eb2148964ae | 1,323 | py | Python | sql/src/test/resources/joins/create_sample_table.py | MichelaSalvemini/Modelli_project | b70d505f9c3fef4a5f857fdccaa60b1b64c8a71d | [
"Apache-2.0"
]
| 677 | 2016-01-04T04:05:50.000Z | 2022-03-24T06:37:27.000Z | sql/src/test/resources/joins/create_sample_table.py | MichelaSalvemini/Modelli_project | b70d505f9c3fef4a5f857fdccaa60b1b64c8a71d | [
"Apache-2.0"
]
| 249 | 2015-12-29T03:41:31.000Z | 2020-09-02T03:11:30.000Z | sql/src/test/resources/joins/create_sample_table.py | MichelaSalvemini/Modelli_project | b70d505f9c3fef4a5f857fdccaa60b1b64c8a71d | [
"Apache-2.0"
]
| 148 | 2015-12-29T03:25:48.000Z | 2021-08-25T03:59:52.000Z | #! /usr/bin/env python
from __future__ import print_function
import pandas as pd
import numpy as np
import argparse
def generate_csv(start_index, fname):
cols = [
str('A' + str(i)) for i in range(start_index, NUM_COLS + start_index)
]
data = []
for i in range(NUM_ROWS):
vals = (np.random.choice(NUM_DISTINCT_VALS) for j in range(NUM_COLS))
data.append(vals)
df = pd.DataFrame(data=data, columns=cols)
df.to_csv(fname, index=False, header=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate sample tables to test joins.')
parser.add_argument('--num-rows', '-r', type=int, default=100)
parser.add_argument('--num-cols', '-c', type=int, required=True)
parser.add_argument('--num-distinct-vals', '-d', type=int, required=True)
parser.add_argument('--num-cols-overlap', '-o', type=int, default=1)
args = parser.parse_args()
NUM_ROWS = args.num_rows
NUM_COLS = args.num_cols
NUM_DISTINCT_VALS = args.num_distinct_vals
num_overlap = args.num_cols_overlap
if num_overlap > NUM_COLS:
print('--num-cols-overlap cannot be greater than --num-cols')
import sys
sys.exit(1)
generate_csv(0, 'table_a.csv')
generate_csv(NUM_COLS - num_overlap, 'table_b.csv')
| 30.068182 | 77 | 0.670446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.177627 |
4067fffb2bd9b7aaa8d3273ea742884e5f876e2d | 1,219 | py | Python | Advanced/1- Introduction/5- Index_words.py | AlirezaMojtabavi/Python_Practice | c0128d6ce4cf172d93cc4e48861e7980e8e016a2 | [
"MIT"
]
| null | null | null | Advanced/1- Introduction/5- Index_words.py | AlirezaMojtabavi/Python_Practice | c0128d6ce4cf172d93cc4e48861e7980e8e016a2 | [
"MIT"
]
| null | null | null | Advanced/1- Introduction/5- Index_words.py | AlirezaMojtabavi/Python_Practice | c0128d6ce4cf172d93cc4e48861e7980e8e016a2 | [
"MIT"
]
| 1 | 2020-11-14T07:19:26.000Z | 2020-11-14T07:19:26.000Z |
indexWords = list()
def PreviousWord(_list, _word):
if _list[_list.index(_word)-1] :
return _list[_list.index(_word)-1]
else:
return
phrase = str(input())
phraseList = phrase.split(" ")
length = len(phraseList)
for item in phraseList :
item = item.strip()
if phrase != "" :
for i in range(1, length-1) :
lengthOfWord = len(phraseList[i])
if phraseList[i][0].isupper() :
if PreviousWord(phraseList, phraseList[i])[-1] != "." :
if phraseList[i][-1]=="." or phraseList[i][-1]=="," :
indexWords.append(i + 1)
indexWords.append(phraseList[i][: lengthOfWord-1])
elif phraseList[i][-1]== "]" and phraseList[i][-2]== "'" :
indexWords.append(i + 1)
indexWords.append(phraseList[i][: lengthOfWord-2])
else :
indexWords.append(i + 1)
indexWords.append(phraseList[i])
else:
print("None")
lengthOfIndexWord = len(indexWords)
if lengthOfIndexWord == 0 :
print("None")
else:
for i in range(0, lengthOfIndexWord//2):
print("%i:%s" %(indexWords[2*i],indexWords[(2*i)+1])) | 31.25641 | 74 | 0.538966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.031993 |
40686bfbfab402b52cf133e6f6f5366a147289d1 | 14,107 | py | Python | appengine/findit/handlers/test/completed_build_pubsub_ingestor_test.py | xswz8015/infra | f956b78ce4c39cc76acdda47601b86794ae0c1ba | [
"BSD-3-Clause"
]
| null | null | null | appengine/findit/handlers/test/completed_build_pubsub_ingestor_test.py | xswz8015/infra | f956b78ce4c39cc76acdda47601b86794ae0c1ba | [
"BSD-3-Clause"
]
| 4 | 2022-03-17T18:58:21.000Z | 2022-03-17T18:58:22.000Z | appengine/findit/handlers/test/completed_build_pubsub_ingestor_test.py | xswz8015/infra | f956b78ce4c39cc76acdda47601b86794ae0c1ba | [
"BSD-3-Clause"
]
| null | null | null | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import mock
import webapp2
from google.appengine.api import taskqueue
from go.chromium.org.luci.buildbucket.proto.build_pb2 import Build
from testing_utils.testing import AppengineTestCase
from common.findit_http_client import FinditHttpClient
from common.waterfall import buildbucket_client
from handlers import completed_build_pubsub_ingestor
from model.isolated_target import IsolatedTarget
class CompletedBuildPubsubIngestorTest(AppengineTestCase):
app_module = webapp2.WSGIApplication([
('/index-isolated-builds',
completed_build_pubsub_ingestor.CompletedBuildPubsubIngestor),
],
debug=True)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testSucessfulPushCIBuild(self, mock_post, mock_get_build, *_):
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.input.properties['builder_group'] = 'chromium.linux'
mock_build.output.properties['buildername'] = 'Linux Builder'
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}'
)['mock_target'] = 'mock_hash'
gitiles_commit = mock_build.input.gitiles_commit
gitiles_commit.host = 'gitiles.host'
gitiles_commit.project = 'gitiles/project'
gitiles_commit.ref = 'refs/heads/mockmaster'
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'Linux Builder'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertEqual(
123,
IsolatedTarget.get_by_id(
'8945610992972640896/mock_target').commit_position)
self.assertEqual(
8945610992972640896,
IsolatedTarget.get_by_id('8945610992972640896/mock_target').build_id)
self.assertEqual(1, len(json.loads(response.body)['created_rows']))
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testPushNoBuild(self, mock_post, *_):
mock_headers = {'X-Prpc-Grpc-Code': '5'}
mock_post.return_value = (404, 'Build not found', mock_headers)
request_body = json.dumps({
'message': {
'attributes': {
'build_id': '123456',
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'result': 'SUCCESS',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body, status=200)
self.assertEqual(200, response.status_int)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testPushPendingBuild(self, mock_post, *_):
request_body = json.dumps({
'message': {
'attributes': {
'build_id': '123456',
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'PENDING',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertFalse(mock_post.called)
self.assertEqual(200, response.status_int)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testSucessfulPushBadFormat(self, mock_post, *_):
request_body = json.dumps({
'message': {},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertFalse(mock_post.called)
self.assertEqual(200, response.status_int)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testNonIsolateBuild(self, mock_post, mock_get_build, *_):
# This build does not isolate any targets.
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.input.properties['builder_group'] = 'chromium.linux'
mock_build.output.properties['buildername'] = 'Linux Tester'
gitiles_commit = mock_build.input.gitiles_commit
gitiles_commit.host = 'gitiles.host'
gitiles_commit.project = 'gitiles/project'
gitiles_commit.ref = 'refs/heads/mockmaster'
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'Linux Tester'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertNotIn('created_rows', response.body)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testNoMasternameBuild(self, mock_post, mock_get_build, *_):
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.output.properties['buildername'] = 'Linux Builder'
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}'
)['mock_target'] = 'mock_hash'
gitiles_commit = mock_build.input.gitiles_commit
gitiles_commit.host = 'gitiles.host'
gitiles_commit.project = 'gitiles/project'
gitiles_commit.ref = 'refs/heads/mockmaster'
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'Linux Builder'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertNotIn('created_rows', response.body)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testSucessfulPushTryJob(self, mock_post, mock_get_build, *_):
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.input.properties['builder_group'] = 'luci.chromium.findit'
mock_build.input.properties['target_builder_group'] = 'chromium.linux'
mock_build.output.properties['buildername'] = ('findit_variable')
mock_build.output.properties['target_buildername'] = (
'linux_chromium_compile_dbg_ng')
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}_with_patch'
)['mock_target'] = 'mock_hash'
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}_without_patch'
)['mock_target'] = 'mock_hash_without'
mock_build.output.properties['repository'] = (
'https://test.googlesource.com/team/project.git')
mock_build.output.properties['gitiles_ref'] = 'refs/heads/mockmaster'
mock_change = mock_build.input.gerrit_changes.add()
mock_change.host = 'mock.gerrit.host'
mock_change.change = 12345
mock_change.patchset = 1
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'findit_variable'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertEqual(
123,
IsolatedTarget.get_by_id(
'8945610992972640896/mock_target').commit_position)
self.assertEqual(2, len(json.loads(response.body)['created_rows']))
# Ensure target values were used.
entry = IsolatedTarget.get_by_id('8945610992972640896/mock_target')
self.assertEqual('chromium.linux', entry.master_name)
self.assertEqual('linux_chromium_compile_dbg_ng', entry.builder_name)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testPushIgnoreV2Push(self, mock_post, *_):
request_body = json.dumps({
'message': {
'attributes': {
'build_id': '123456',
'version': 'v2',
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertFalse(mock_post.called)
self.assertEqual(200, response.status_int)
| 40.654179 | 78 | 0.607712 | 13,518 | 0.958248 | 0 | 0 | 13,228 | 0.937691 | 0 | 0 | 3,741 | 0.265187 |
40686c4879d63aced85e26a35f076b9028592fdb | 24,660 | py | Python | sdk/python/pulumi_azure_native/containerservice/v20191027preview/open_shift_managed_cluster.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
]
| null | null | null | sdk/python/pulumi_azure_native/containerservice/v20191027preview/open_shift_managed_cluster.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
]
| null | null | null | sdk/python/pulumi_azure_native/containerservice/v20191027preview/open_shift_managed_cluster.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
]
| null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['OpenShiftManagedClusterArgs', 'OpenShiftManagedCluster']
@pulumi.input_type
class OpenShiftManagedClusterArgs:
def __init__(__self__, *,
open_shift_version: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]]] = None,
auth_profile: Optional[pulumi.Input['OpenShiftManagedClusterAuthProfileArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs']] = None,
monitor_profile: Optional[pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs']] = None,
network_profile: Optional[pulumi.Input['NetworkProfileArgs']] = None,
plan: Optional[pulumi.Input['PurchasePlanArgs']] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a OpenShiftManagedCluster resource.
:param pulumi.Input[str] open_shift_version: Version of OpenShift specified when creating the cluster.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]] agent_pool_profiles: Configuration of OpenShift cluster VMs.
:param pulumi.Input['OpenShiftManagedClusterAuthProfileArgs'] auth_profile: Configures OpenShift authentication.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs'] master_pool_profile: Configuration for OpenShift master VMs.
:param pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs'] monitor_profile: Configures Log Analytics integration.
:param pulumi.Input['NetworkProfileArgs'] network_profile: Configuration for OpenShift networking.
:param pulumi.Input['PurchasePlanArgs'] plan: Define the resource plan as required by ARM for billing purposes
:param pulumi.Input[bool] refresh_cluster: Allows node rotation
:param pulumi.Input[str] resource_name: The name of the OpenShift managed cluster resource.
:param pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]] router_profiles: Configuration for OpenShift router(s).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
pulumi.set(__self__, "open_shift_version", open_shift_version)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if agent_pool_profiles is not None:
pulumi.set(__self__, "agent_pool_profiles", agent_pool_profiles)
if auth_profile is not None:
pulumi.set(__self__, "auth_profile", auth_profile)
if location is not None:
pulumi.set(__self__, "location", location)
if master_pool_profile is not None:
pulumi.set(__self__, "master_pool_profile", master_pool_profile)
if monitor_profile is not None:
pulumi.set(__self__, "monitor_profile", monitor_profile)
if network_profile is not None:
pulumi.set(__self__, "network_profile", network_profile)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if refresh_cluster is not None:
pulumi.set(__self__, "refresh_cluster", refresh_cluster)
if resource_name is not None:
pulumi.set(__self__, "resource_name", resource_name)
if router_profiles is not None:
pulumi.set(__self__, "router_profiles", router_profiles)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="openShiftVersion")
def open_shift_version(self) -> pulumi.Input[str]:
"""
Version of OpenShift specified when creating the cluster.
"""
return pulumi.get(self, "open_shift_version")
@open_shift_version.setter
def open_shift_version(self, value: pulumi.Input[str]):
pulumi.set(self, "open_shift_version", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="agentPoolProfiles")
def agent_pool_profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]]]:
"""
Configuration of OpenShift cluster VMs.
"""
return pulumi.get(self, "agent_pool_profiles")
@agent_pool_profiles.setter
def agent_pool_profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterAgentPoolProfileArgs']]]]):
pulumi.set(self, "agent_pool_profiles", value)
@property
@pulumi.getter(name="authProfile")
def auth_profile(self) -> Optional[pulumi.Input['OpenShiftManagedClusterAuthProfileArgs']]:
"""
Configures OpenShift authentication.
"""
return pulumi.get(self, "auth_profile")
@auth_profile.setter
def auth_profile(self, value: Optional[pulumi.Input['OpenShiftManagedClusterAuthProfileArgs']]):
pulumi.set(self, "auth_profile", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="masterPoolProfile")
def master_pool_profile(self) -> Optional[pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs']]:
"""
Configuration for OpenShift master VMs.
"""
return pulumi.get(self, "master_pool_profile")
@master_pool_profile.setter
def master_pool_profile(self, value: Optional[pulumi.Input['OpenShiftManagedClusterMasterPoolProfileArgs']]):
pulumi.set(self, "master_pool_profile", value)
@property
@pulumi.getter(name="monitorProfile")
def monitor_profile(self) -> Optional[pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs']]:
"""
Configures Log Analytics integration.
"""
return pulumi.get(self, "monitor_profile")
@monitor_profile.setter
def monitor_profile(self, value: Optional[pulumi.Input['OpenShiftManagedClusterMonitorProfileArgs']]):
pulumi.set(self, "monitor_profile", value)
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:
"""
Configuration for OpenShift networking.
"""
return pulumi.get(self, "network_profile")
@network_profile.setter
def network_profile(self, value: Optional[pulumi.Input['NetworkProfileArgs']]):
pulumi.set(self, "network_profile", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input['PurchasePlanArgs']]:
"""
Define the resource plan as required by ARM for billing purposes
"""
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input['PurchasePlanArgs']]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter(name="refreshCluster")
def refresh_cluster(self) -> Optional[pulumi.Input[bool]]:
"""
Allows node rotation
"""
return pulumi.get(self, "refresh_cluster")
@refresh_cluster.setter
def refresh_cluster(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "refresh_cluster", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the OpenShift managed cluster resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="routerProfiles")
def router_profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]]]:
"""
Configuration for OpenShift router(s).
"""
return pulumi.get(self, "router_profiles")
@router_profiles.setter
def router_profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftRouterProfileArgs']]]]):
pulumi.set(self, "router_profiles", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class OpenShiftManagedCluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]]] = None,
auth_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']]] = None,
monitor_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']]] = None,
network_profile: Optional[pulumi.Input[pulumi.InputType['NetworkProfileArgs']]] = None,
open_shift_version: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['PurchasePlanArgs']]] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
OpenShift Managed cluster.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]] agent_pool_profiles: Configuration of OpenShift cluster VMs.
:param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']] auth_profile: Configures OpenShift authentication.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']] master_pool_profile: Configuration for OpenShift master VMs.
:param pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']] monitor_profile: Configures Log Analytics integration.
:param pulumi.Input[pulumi.InputType['NetworkProfileArgs']] network_profile: Configuration for OpenShift networking.
:param pulumi.Input[str] open_shift_version: Version of OpenShift specified when creating the cluster.
:param pulumi.Input[pulumi.InputType['PurchasePlanArgs']] plan: Define the resource plan as required by ARM for billing purposes
:param pulumi.Input[bool] refresh_cluster: Allows node rotation
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name_: The name of the OpenShift managed cluster resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]] router_profiles: Configuration for OpenShift router(s).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: OpenShiftManagedClusterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
OpenShift Managed cluster.
:param str resource_name: The name of the resource.
:param OpenShiftManagedClusterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OpenShiftManagedClusterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]]] = None,
auth_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']]] = None,
monitor_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']]] = None,
network_profile: Optional[pulumi.Input[pulumi.InputType['NetworkProfileArgs']]] = None,
open_shift_version: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['PurchasePlanArgs']]] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OpenShiftManagedClusterArgs.__new__(OpenShiftManagedClusterArgs)
__props__.__dict__["agent_pool_profiles"] = agent_pool_profiles
__props__.__dict__["auth_profile"] = auth_profile
__props__.__dict__["location"] = location
__props__.__dict__["master_pool_profile"] = master_pool_profile
__props__.__dict__["monitor_profile"] = monitor_profile
__props__.__dict__["network_profile"] = network_profile
if open_shift_version is None and not opts.urn:
raise TypeError("Missing required property 'open_shift_version'")
__props__.__dict__["open_shift_version"] = open_shift_version
__props__.__dict__["plan"] = plan
__props__.__dict__["refresh_cluster"] = refresh_cluster
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["router_profiles"] = router_profiles
__props__.__dict__["tags"] = tags
__props__.__dict__["cluster_version"] = None
__props__.__dict__["fqdn"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_hostname"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerservice/v20191027preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20180930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20180930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20190430:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190430:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20190930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190930preview:OpenShiftManagedCluster")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(OpenShiftManagedCluster, __self__).__init__(
'azure-native:containerservice/v20191027preview:OpenShiftManagedCluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'OpenShiftManagedCluster':
"""
Get an existing OpenShiftManagedCluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = OpenShiftManagedClusterArgs.__new__(OpenShiftManagedClusterArgs)
__props__.__dict__["agent_pool_profiles"] = None
__props__.__dict__["auth_profile"] = None
__props__.__dict__["cluster_version"] = None
__props__.__dict__["fqdn"] = None
__props__.__dict__["location"] = None
__props__.__dict__["master_pool_profile"] = None
__props__.__dict__["monitor_profile"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_profile"] = None
__props__.__dict__["open_shift_version"] = None
__props__.__dict__["plan"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_hostname"] = None
__props__.__dict__["refresh_cluster"] = None
__props__.__dict__["router_profiles"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return OpenShiftManagedCluster(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="agentPoolProfiles")
def agent_pool_profiles(self) -> pulumi.Output[Optional[Sequence['outputs.OpenShiftManagedClusterAgentPoolProfileResponse']]]:
"""
Configuration of OpenShift cluster VMs.
"""
return pulumi.get(self, "agent_pool_profiles")
@property
@pulumi.getter(name="authProfile")
def auth_profile(self) -> pulumi.Output[Optional['outputs.OpenShiftManagedClusterAuthProfileResponse']]:
"""
Configures OpenShift authentication.
"""
return pulumi.get(self, "auth_profile")
@property
@pulumi.getter(name="clusterVersion")
def cluster_version(self) -> pulumi.Output[str]:
"""
Version of OpenShift specified when creating the cluster.
"""
return pulumi.get(self, "cluster_version")
@property
@pulumi.getter
def fqdn(self) -> pulumi.Output[str]:
"""
Service generated FQDN for OpenShift API server loadbalancer internal hostname.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="masterPoolProfile")
def master_pool_profile(self) -> pulumi.Output[Optional['outputs.OpenShiftManagedClusterMasterPoolProfileResponse']]:
"""
Configuration for OpenShift master VMs.
"""
return pulumi.get(self, "master_pool_profile")
@property
@pulumi.getter(name="monitorProfile")
def monitor_profile(self) -> pulumi.Output[Optional['outputs.OpenShiftManagedClusterMonitorProfileResponse']]:
"""
Configures Log Analytics integration.
"""
return pulumi.get(self, "monitor_profile")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> pulumi.Output[Optional['outputs.NetworkProfileResponse']]:
"""
Configuration for OpenShift networking.
"""
return pulumi.get(self, "network_profile")
@property
@pulumi.getter(name="openShiftVersion")
def open_shift_version(self) -> pulumi.Output[str]:
"""
Version of OpenShift specified when creating the cluster.
"""
return pulumi.get(self, "open_shift_version")
@property
@pulumi.getter
def plan(self) -> pulumi.Output[Optional['outputs.PurchasePlanResponse']]:
"""
Define the resource plan as required by ARM for billing purposes
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The current deployment or provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicHostname")
def public_hostname(self) -> pulumi.Output[str]:
"""
Service generated FQDN or private IP for OpenShift API server.
"""
return pulumi.get(self, "public_hostname")
@property
@pulumi.getter(name="refreshCluster")
def refresh_cluster(self) -> pulumi.Output[Optional[bool]]:
"""
Allows node rotation
"""
return pulumi.get(self, "refresh_cluster")
@property
@pulumi.getter(name="routerProfiles")
def router_profiles(self) -> pulumi.Output[Optional[Sequence['outputs.OpenShiftRouterProfileResponse']]]:
"""
Configuration for OpenShift router(s).
"""
return pulumi.get(self, "router_profiles")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| 47.514451 | 856 | 0.679927 | 24,181 | 0.980576 | 0 | 0 | 18,975 | 0.769465 | 0 | 0 | 10,258 | 0.415977 |
40686f7cd56545ec9981f33c3903dd74fd6b1048 | 326 | py | Python | django_drf_server/quiz/migrations/0017_remove_quiz_questions.py | pammalPrasanna/quizie | 3c03552c39ef3d7e613f5b613479df4ef8d44ac1 | [
"MIT"
]
| null | null | null | django_drf_server/quiz/migrations/0017_remove_quiz_questions.py | pammalPrasanna/quizie | 3c03552c39ef3d7e613f5b613479df4ef8d44ac1 | [
"MIT"
]
| null | null | null | django_drf_server/quiz/migrations/0017_remove_quiz_questions.py | pammalPrasanna/quizie | 3c03552c39ef3d7e613f5b613479df4ef8d44ac1 | [
"MIT"
]
| null | null | null | # Generated by Django 3.2.4 on 2021-06-17 02:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quiz', '0016_auto_20210617_0724'),
]
operations = [
migrations.RemoveField(
model_name='quiz',
name='questions',
),
]
| 18.111111 | 47 | 0.588957 | 241 | 0.739264 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.291411 |
4069e772d72345dc8c5aa0533940bffe33f5921a | 18,348 | py | Python | main.py | khan-git/webRecipies | 4fa9f9bc3c9809f82c5c8fd94dbb604da3443dcb | [
"MIT"
]
| null | null | null | main.py | khan-git/webRecipies | 4fa9f9bc3c9809f82c5c8fd94dbb604da3443dcb | [
"MIT"
]
| null | null | null | main.py | khan-git/webRecipies | 4fa9f9bc3c9809f82c5c8fd94dbb604da3443dcb | [
"MIT"
]
| null | null | null | # -*- coding: iso-8859-1 -*-
import os
import shutil
import datetime
import sqlite3
from flask import Flask, request, session, render_template, g, redirect, url_for, abort, flash, make_response
from random import randint
import json
import urllib2
import json
from json.decoder import JSONObject
from werkzeug.utils import secure_filename
UPLOAD_FOLDER = '/tmp'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
DBBACKUPPATH = os.path.abspath('db_backup')
if os.path.exists(DBBACKUPPATH) == False:
os.mkdir(DBBACKUPPATH)
app = Flask(__name__)
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
app = Flask(__name__)
app.config.from_object(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'recipes.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default',
UPLOAD_FOLDER='/tmp'
))
app.config['UPPLOAD_FOLDER'] = '/tmp'
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
if os.path.exists(app.config['DATABASE']) == False:
cmd = 'sqlite3 recipes.db < database.sql'
os.system(cmd)
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
def init_db():
db = get_db()
with app.open_resource('database.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def queryDbFetchOne(query):
"""Query database, return one result"""
db = get_db()
cur = db.cursor()
cur.execute(query)
return cur.fetchone()
def queryDbFetchAll(query):
"""Query database, return one result"""
db = get_db()
cur = db.cursor()
cur.execute(query)
return cur.fetchall()
def getRecipe(recipeKey):
"""Get recipe data"""
return queryDbFetchOne('SELECT * FROM recipes WHERE key="%s"'%recipeKey)
def getIngredients(recipeKey):
"""Get all ingredients for a recipe"""
return queryDbFetchAll('SELECT * FROM recipeAmount WHERE recipeKey="%s"'%recipeKey)
def getNextKey():
"""Get next number for key"""
currentHighKey = queryDbFetchOne('SELECT key FROM recipes ORDER BY key DESC')
if currentHighKey is None:
print "IS none %s"%currentHighKey
currentHighKey = 0
else:
currentHighKey = int(currentHighKey[0])
return currentHighKey +1
def insertIntoDb(table, names, values):
"""Insert into database"""
if len(values) != len(names):
return None
query = 'INSERT INTO %s (%s) VALUES(%s)'%(table, ', '.join(names), ', '.join(values))
rowId = None
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
finally:
return rowId
def doRawQuery(query):
"""Do a raw query"""
rowId = None
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
finally:
return rowId
def updateDb(table, names, values, where):
"""Update row in table"""
if len(values) != len(names):
return None
query = 'UPDATE %s SET '%(table)
qPairs = []
for name, value in zip(names,values):
qPairs.append('%s=%s'%(name,value))
query += ', '.join(x for x in qPairs)
query += ' %s'%where
rowId = None
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
finally:
return rowId
@app.route('/prepdb')
def prepdb():
"""Prepare database from json file"""
f = open('recipes.json','r')
buff = f.read()
recipes = json.loads(buff)
for item in recipes:
recipeKey = getNextKey()
rowId = insertIntoDb('recipes', ['key', 'title','instructions', 'portions'],
[recipeKey, '"%s"'%item['title'], '"%s"'%item['instructions'], item['portions']])
for ingredient in item['ingredients']:
keys = ingredient.keys()
keys.insert(0, 'recipeKey')
values = ingredient.values()
values.insert(0, recipeKey)
rId = insertIntoDb('recipeAmount', keys, values)
for group in item['recipeTag']:
insertIntoDb('recipeTag', ['recipeKey', 'group'], [recipeKey, '"%s"'%group])
if 'fridge' in item:
insertIntoDb('fridge', ['recipeKey', 'portions'], [recipeKey, item['fridge']])
print " Fridge %d"%item['fridge']
else:
print "No fridge"
return index()
@app.cli.command('initdb')
def initdb_command():
"""Initializes the database."""
init_db()
print 'Initialized the database.'
@app.route('/help')
def help():
values = {'pageId': 'help',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('help.html', **values)
@app.route('/')
def index():
values = {'pageId': 'index',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('index.html', **values)
# return redirect('login', code=304)
@app.route('/login', methods=['GET','POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != 'admin' or request.form['password'] != 'admin':
error = 'Invalid Credentials. Please try again.'
else:
return redirect(url_for('favourite'), code=304)
values = {'pageId': 'index',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048),
'error': error
}
return render_template('login.html', **values)
@app.route('/editRecipe', methods=['GET'])
def editRecipe():
return newRecipe(request.args['recipeKey'])
@app.route('/deleteRecipe', methods=['GET'])
def deleteRecipe():
# TODO
if 'recipeKey' in request.args:
pass
pass
def deleteAmount(recipeKey):
query = 'DELETE FROM recipeAmount WHERE recipeKey=%s'%recipeKey
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
msg = "error in delete operation"
print msg
finally:
return rowId
@app.route('/newRecipe')
def newRecipe(recipeKey=None):
if recipeKey is not None:
recipe = getRecipe(recipeKey)
ingredients = getIngredients(recipeKey)
else:
recipe = None
ingredients = None
entries = queryDbFetchAll('SELECT name FROM ingredients ')
measurements = queryDbFetchAll('SELECT short FROM measurements ')
values = {'ingredientsList': entries,
'measurements':measurements,
'recipe':recipe,
'ingredients':ingredients,
'pageId': 'newRecipe',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('newRecipe.html', **values)
@app.route('/error')
def errorHtml():
values = {'pageId': 'error',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('error.html', **values)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/saveRecipe', methods=['POST'])
def saveRecipe():
# TODO add last update time
title = request.form['title']
names = ['title']
values = ['"%s"'%title]
if 'instructions' in request.form:
names.append('instructions')
values.append('"%s"'%request.form['instructions'])
if 'portions' in request.form:
names.append('portions')
values.append(request.form['portions'])
if 'recipeKey' in request.form:
recipeKey = request.form['recipeKey']
updateDb('recipes', names, values, 'WHERE key=%s'%recipeKey)
else:
recipeKey = getNextKey()
names.insert(0, 'key')
values.insert(0, '%d'%recipeKey)
if insertIntoDb('recipes', names, values) is None:
return json.dumps({'redirect':'false', 'result': 'Error creating recipe'})
amount = request.form.getlist('amount')
measurement = request.form.getlist('measurement')
ingredients = request.form.getlist('ingredient')
deleteAmount(recipeKey)
for a,m,i in zip(amount, measurement, ingredients):
names = ['recipeKey', 'ingredient', 'amount', 'measurement']
values = [str(recipeKey), '"%s"'%i, str(a), '"%s"'%m]
if insertIntoDb('recipeAmount', names, values) is None:
return json.dumps({'redirect':'false', 'result': 'Error creating recipe'})
return json.dumps({'redirect':True, 'url': '/show/recipe?recipe=%s'%recipeKey})
@app.route('/show/recipe', methods=['GET'])
def showRecipe():
recipeKey = request.args.get('recipe')
recipe = getRecipe(recipeKey)
return displayRecipe(recipe)
def displayRecipe(recipe):
values = {'key':recipe['key'],
'title': recipe['title'],
'instructions': recipe['instructions'],
'portions': recipe['portions'],
'ingredients': getIngredients(recipe['key']),
'pageId': 'displayRecipe',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('displayRecipe_template.html', **values)
@app.route('/randomRecipe', methods=['GET'])
def randomRecipe():
recipes = queryDbFetchAll('SELECT * FROM recipes ORDER BY RANDOM() LIMIT 4')
return render_template('listRecipes.html', header='Förslag:', lastRecipes=recipes)
@app.route('/menuSuggestion', methods=['GET'])
def menuSuggestion():
recipes = queryDbFetchAll('SELECT * FROM recipes ORDER BY RANDOM() LIMIT 4')
if 'update' in request.args:
return render_template('onlyList.html', lastRecipes=recipes)
values = {'pagetitle':'Receptakuten',
'title': 'Förslag:',
'lastRecipes': recipes,
'refresh': 'true',
'pageId': 'menuSuggestion',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('listRecipes.html', **values)
@app.route('/ajax/search', methods=['GET'])
def searchAjax():
if request.method == 'GET':
patterns = request.args.getlist('searchPatterns[]')
query = ''
for p in patterns:
if len(query) > 0:
query = '%s or '%query
query += 'title LIKE "%%%s%%" or instructions LIKE "%%%s%%"'%(p, p)
query = 'SELECT key, title FROM recipes WHERE %s LIMIT 10'%query
results = queryDbFetchAll(query)
t = []
for p in results:
h = {}
for k in p.keys():
h[k] = p[k]
t.append(h)
return json.dumps(t)
@app.route('/ajax/searchIngredient', methods=['GET'])
def searchIngredient():
if request.method == 'GET':
patterns = request.args.getlist('searchPatterns[]')
print patterns
query = ''
for p in patterns:
if len(query) > 0:
query = '%s or '%query
query += 'ingredient LIKE "%%%s%%"'%(p)
query = 'SELECT DISTINCT ingredient FROM recipeAmount WHERE %s'%query
print query
results = queryDbFetchAll(query)
t = []
for p in results:
h = {}
for k in p.keys():
h[k] = p[k]
t.append(h)
return json.dumps(t)
@app.route('/search')
def search():
values = {'pageId': 'search',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('search.html', **values)
def getFridgeJSON():
fridgeContent = queryDbFetchAll('SELECT key, title, fridge.portions AS portions FROM recipes INNER JOIN fridge ON recipes.key = fridge.recipeKey')
fridgeJson = []
for row in fridgeContent:
rowJson = {}
for key in row.keys():
rowJson[key] = row[key]
fridgeJson.append(rowJson)
return json.dumps(fridgeJson)
@app.route('/fromTheFridge')
def fromTheFridge():
values = {'pageId': 'fromTheFridge',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('whatsinthefridge.html', **values)
# Update fridge content
@app.route('/ajax/updateFridge', methods=['GET','POST'])
def updateFridge():
if request.method == 'POST':
recipesJson = request.form.getlist('recipes')
recipes = json.loads(recipesJson[0])
keys = []
for item in recipes:
keys.append(item['key'])
queryUpdate = 'UPDATE fridge SET portions=%d WHERE recipeKey=%d'%(item['portions'], item['key'])
queryInsert = 'INSERT INTO fridge (recipeKey, portions) SELECT %d,%d WHERE(Select Changes() = 0)'%(item['key'], item['portions'])
doRawQuery(queryUpdate)
doRawQuery(queryInsert)
currentKeys = queryDbFetchAll('SELECT recipeKey FROM fridge ORDER BY recipeKey')
for key in currentKeys:
if key['recipeKey'] not in keys:
deleteQuery = 'DELETE FROM fridge WHERE recipeKey=%s'%key['recipeKey']
doRawQuery(deleteQuery)
return getFridgeJSON()
@app.route('/groceryList')
def groceryList():
recipes = queryDbFetchAll('SELECT key, title, portions FROM recipes ORDER BY title')
ingredients = {}
for recipe in recipes:
ingredients[recipe['key']] = getIngredients(recipe['key'])
values = {'pageId': 'groceryList',
'recipes': recipes,
'ingredients': ingredients,
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('groceryList.html', **values)
@app.route('/favourite')
def favourite():
"""Show favourite recipes"""
values = {'pageId': 'favouritePage',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('favourite.html', **values)
@app.route('/ajax/getRecipesJson', methods=['GET','POST'])
def getRecipesJson():
if request.method == 'POST':
recipeKeys = request.form.getlist('recipe')
query = 'SELECT * FROM recipes where '
qyeryKeys = []
for recipes in recipeKeys:
jsonKeys = json.loads(recipes)
for key in jsonKeys:
qyeryKeys.append('key=%s'%key['recipeKey'])
query += ' OR '.join(qyeryKeys)
recipeList = queryDbFetchAll(query)
jsonReply = []
for rowRecipe in recipeList:
tmpJson = {}
for key in rowRecipe.keys():
tmpJson[key] = rowRecipe[key]
ingredientsJson = []
for row in getIngredients(rowRecipe['key']):
tmpIngredient = {}
for key in row.keys():
if key == 'recipeKey':
continue
tmpIngredient[key] = row[key]
ingredientsJson.append(tmpIngredient)
tmpJson['ingredients'] = ingredientsJson
jsonReply.append(tmpJson)
return json.dumps(jsonReply)
recipes = queryDbFetchAll('SELECT key, title FROM recipes')
rows = []
for i in recipes:
rows.append(dict(i))
return json.dumps(rows)
@app.route('/manifest.json')
def manifestJSON():
return url_for('static', filename='manifest.json')
@app.route('/manifest.appcache')
def manifest():
res = make_response(render_template('manifest.appcache'), 200)
res.headers["Content-Type"] = "text/cache-manifest"
return res
@app.route('/admin/restore', methods = ['POST'])
def dorestore():
versionF = os.path.abspath(os.path.join(DBBACKUPPATH, request.form.get('version')))
if os.path.exists(versionF):
now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
name = '%s_bfrestore.sql'%now
dobackup(name)
tables = queryDbFetchAll('SELECT name FROM sqlite_master WHERE type = "table"')
for tab in tables:
doRawQuery('DROP TABLE %s'%tab['name'])
cmd = 'sqlite3 recipes.db < %s'%versionF
os.system(cmd)
return getstatus()
@app.route('/admin/backup')
def adminbackup():
now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
dobackup(now+'.sql')
return getstatus()
def dobackup(name):
dbF = open(os.path.join(DBBACKUPPATH, name), 'w')
con = get_db()
dbF.write('\n'.join(con.iterdump()).encode('utf8'))
dbF.close()
@app.route('/admin/status')
def getstatus():
status = {}
status['num_of_recipes'] = queryDbFetchOne('SELECT count(*) as rows FROM recipes')['rows']
status['num_of_fridge'] = queryDbFetchOne('SELECT count(*) as rows FROM fridge')['rows']
status['num_of_ingredients'] = queryDbFetchOne('SELECT count(*) as rows FROM (SELECT DISTINCT ingredient FROM recipeAmount)')['rows']
status['backups'] = sorted(os.listdir(DBBACKUPPATH), reverse=True)
return json.dumps(status, sort_keys=True, indent=4, separators=(',', ': '))
@app.route('/admin')
def adminpage():
values = {'pageId': 'adminPage',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('admin.html', **values)
if __name__ == "__main__":
# import logging
# file_handler = RotatingFileHandler('/tmp/receptakuten.log', bakupCount=5)
# file_handler.setLevel(logging.WARNING)
# app.logger.addHandler(file_handler)
app.run(host="0.0.0.0", debug=True)
# app.run(debug=True)
| 33 | 150 | 0.601373 | 0 | 0 | 0 | 0 | 12,285 | 0.669555 | 0 | 0 | 5,343 | 0.291203 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.