max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
aspath_graph/utils.py | coxley/aspath_graph | 28 | 6630351 | <gh_stars>10-100
'''
utils.py
--------
Some common utilities
'''
from __future__ import print_function
import struct
def dedupe(items):
# This function is required because set() doesn't mantain list order which
# is important when dealing with paths
seen = set()
for item in items:
if item not in seen:
yield item
seen.add(item)
def asn_to_label(asn, label_map={}, asdot=False): # -> str
'''Return label mapped to an ASN
We follow a trend where if ASDOT notation, the value after the dot is
metadata and before is the location. In this case let's name that
asn: ASN as str
label_map: dict of asn -> human label
asdot: Whether to convert to ASDOT or not
'''
location = asn
meta = 0
# We separate the AS into location and meta. Not all people may follow
# a convention like this which is why we can change suffix to '' below
if asdot:
if bytesize(int(asn)) > 2:
dot = plain_to_dot(int(asn))
location, meta = dot.split('.')
# If label not found, default back to location
location_name = label_map.get(location, location)
# Rack switch ASNs are assumed to be (racknumber * 10) + 1
# Anything else is considered anycast
if all([meta, int(meta) % 10 == 1]):
suffix = '-R%d' % (int(meta)/10)
elif meta:
suffix = '-CAST-%s' % meta
else:
suffix = ''
return location_name + suffix
def plain_to_dot(asn): # -> str
'''Take ASPLAIN and return ASDOT notation
asn: int
'''
barray = struct.pack('>I', asn)
return '%d.%d' % struct.unpack('>HH', barray)
def dot_to_plain(asn): # -> int
'''Take ASDOT and return ASPLAIN notation
asn: string - two nums separated by period (.)
'''
a1, a2 = asn.split('.')
barray = struct.pack('>HH', int(a1), int(a2))
return struct.unpack('>I', barray)[0]
def bytesize(i): # -> int
'''Return bytesize'''
return (i.bit_length() + 7) // 8
def link_paths(aspath, ownas=''): # -> Dict[str, Any]
'''Link AS PATH into pairs
aspath: ASPath string
ownas: AS to prepend to PATH
'''
# Remove the AS PATH origin keys
path = aspath.split()
path = [p for p in path if p not in ['?', 'I', 'E']]
# Add our own AS to the beginning of the PATH
path.insert(0, ownas)
# Eliminate prepends, but still provide a list for .__getitem__ for zipping
path = list(dedupe(path))
return {'nodes': path, 'pairs': zip(path, path[1:])}
| '''
utils.py
--------
Some common utilities
'''
from __future__ import print_function
import struct
def dedupe(items):
# This function is required because set() doesn't mantain list order which
# is important when dealing with paths
seen = set()
for item in items:
if item not in seen:
yield item
seen.add(item)
def asn_to_label(asn, label_map={}, asdot=False): # -> str
'''Return label mapped to an ASN
We follow a trend where if ASDOT notation, the value after the dot is
metadata and before is the location. In this case let's name that
asn: ASN as str
label_map: dict of asn -> human label
asdot: Whether to convert to ASDOT or not
'''
location = asn
meta = 0
# We separate the AS into location and meta. Not all people may follow
# a convention like this which is why we can change suffix to '' below
if asdot:
if bytesize(int(asn)) > 2:
dot = plain_to_dot(int(asn))
location, meta = dot.split('.')
# If label not found, default back to location
location_name = label_map.get(location, location)
# Rack switch ASNs are assumed to be (racknumber * 10) + 1
# Anything else is considered anycast
if all([meta, int(meta) % 10 == 1]):
suffix = '-R%d' % (int(meta)/10)
elif meta:
suffix = '-CAST-%s' % meta
else:
suffix = ''
return location_name + suffix
def plain_to_dot(asn): # -> str
'''Take ASPLAIN and return ASDOT notation
asn: int
'''
barray = struct.pack('>I', asn)
return '%d.%d' % struct.unpack('>HH', barray)
def dot_to_plain(asn): # -> int
'''Take ASDOT and return ASPLAIN notation
asn: string - two nums separated by period (.)
'''
a1, a2 = asn.split('.')
barray = struct.pack('>HH', int(a1), int(a2))
return struct.unpack('>I', barray)[0]
def bytesize(i): # -> int
'''Return bytesize'''
return (i.bit_length() + 7) // 8
def link_paths(aspath, ownas=''): # -> Dict[str, Any]
'''Link AS PATH into pairs
aspath: ASPath string
ownas: AS to prepend to PATH
'''
# Remove the AS PATH origin keys
path = aspath.split()
path = [p for p in path if p not in ['?', 'I', 'E']]
# Add our own AS to the beginning of the PATH
path.insert(0, ownas)
# Eliminate prepends, but still provide a list for .__getitem__ for zipping
path = list(dedupe(path))
return {'nodes': path, 'pairs': zip(path, path[1:])} | en | 0.815821 | utils.py -------- Some common utilities # This function is required because set() doesn't mantain list order which # is important when dealing with paths # -> str Return label mapped to an ASN We follow a trend where if ASDOT notation, the value after the dot is metadata and before is the location. In this case let's name that asn: ASN as str label_map: dict of asn -> human label asdot: Whether to convert to ASDOT or not # We separate the AS into location and meta. Not all people may follow # a convention like this which is why we can change suffix to '' below # If label not found, default back to location # Rack switch ASNs are assumed to be (racknumber * 10) + 1 # Anything else is considered anycast # -> str Take ASPLAIN and return ASDOT notation asn: int # -> int Take ASDOT and return ASPLAIN notation asn: string - two nums separated by period (.) # -> int Return bytesize # -> Dict[str, Any] Link AS PATH into pairs aspath: ASPath string ownas: AS to prepend to PATH # Remove the AS PATH origin keys # Add our own AS to the beginning of the PATH # Eliminate prepends, but still provide a list for .__getitem__ for zipping | 3.098931 | 3 |
server/chalicelib/secrets.py | friendchris/t-performance-dash | 0 | 6630352 | import os
if 'MBTA_V2_API_KEY' in os.environ:
MBTA_V2_API_KEY = os.environ['MBTA_V2_API_KEY']
MBTA_V3_API_KEY = os.environ['MBTA_V3_API_KEY']
else:
MBTA_V2_API_KEY = ''
MBTA_V3_API_KEY = ''
| import os
if 'MBTA_V2_API_KEY' in os.environ:
MBTA_V2_API_KEY = os.environ['MBTA_V2_API_KEY']
MBTA_V3_API_KEY = os.environ['MBTA_V3_API_KEY']
else:
MBTA_V2_API_KEY = ''
MBTA_V3_API_KEY = ''
| none | 1 | 1.605078 | 2 |
|
pyy1/.pycharm_helpers/python_stubs/-1550516950/_tracemalloc.py | pyy1988/pyy_test1 | 0 | 6630353 | # encoding: utf-8
# module _tracemalloc
# from (built-in)
# by generator 1.145
""" Debug module to trace memory blocks allocated by Python. """
# no imports
# functions
def clear_traces(): # real signature unknown; restored from __doc__
"""
clear_traces()
Clear traces of memory blocks allocated by Python.
"""
pass
def get_traceback_limit(): # real signature unknown; restored from __doc__
"""
get_traceback_limit() -> int
Get the maximum number of frames stored in the traceback
of a trace.
By default, a trace of an allocated memory block only stores
the most recent frame: the limit is 1.
"""
return 0
def get_traced_memory(): # real signature unknown; restored from __doc__
"""
get_traced_memory() -> (int, int)
Get the current size and peak size of memory blocks traced
by the tracemalloc module as a tuple: (current: int, peak: int).
"""
pass
def get_tracemalloc_memory(): # real signature unknown; restored from __doc__
"""
get_tracemalloc_memory() -> int
Get the memory usage in bytes of the tracemalloc module
used internally to trace memory allocations.
"""
return 0
def is_tracing(): # real signature unknown; restored from __doc__
"""
is_tracing()->bool
True if the tracemalloc module is tracing Python memory allocations,
False otherwise.
"""
return False
def start(nframe=1): # real signature unknown; restored from __doc__
"""
start(nframe: int=1)
Start tracing Python memory allocations. Set also the maximum number
of frames stored in the traceback of a trace to nframe.
"""
pass
def stop(): # real signature unknown; restored from __doc__
"""
stop()
Stop tracing Python memory allocations and clear traces
of memory blocks allocated by Python.
"""
pass
def _get_object_traceback(obj): # real signature unknown; restored from __doc__
"""
_get_object_traceback(obj)
Get the traceback where the Python object obj was allocated.
Return a tuple of (filename: str, lineno: int) tuples.
Return None if the tracemalloc module is disabled or did not
trace the allocation of the object.
"""
pass
def _get_traces(): # real signature unknown; restored from __doc__
"""
_get_traces() -> list
Get traces of all memory blocks allocated by Python.
Return a list of (size: int, traceback: tuple) tuples.
traceback is a tuple of (filename: str, lineno: int) tuples.
Return an empty list if the tracemalloc module is disabled.
"""
return []
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
# variables with complex values
__spec__ = None # (!) real value is ''
| # encoding: utf-8
# module _tracemalloc
# from (built-in)
# by generator 1.145
""" Debug module to trace memory blocks allocated by Python. """
# no imports
# functions
def clear_traces(): # real signature unknown; restored from __doc__
"""
clear_traces()
Clear traces of memory blocks allocated by Python.
"""
pass
def get_traceback_limit(): # real signature unknown; restored from __doc__
"""
get_traceback_limit() -> int
Get the maximum number of frames stored in the traceback
of a trace.
By default, a trace of an allocated memory block only stores
the most recent frame: the limit is 1.
"""
return 0
def get_traced_memory(): # real signature unknown; restored from __doc__
"""
get_traced_memory() -> (int, int)
Get the current size and peak size of memory blocks traced
by the tracemalloc module as a tuple: (current: int, peak: int).
"""
pass
def get_tracemalloc_memory(): # real signature unknown; restored from __doc__
"""
get_tracemalloc_memory() -> int
Get the memory usage in bytes of the tracemalloc module
used internally to trace memory allocations.
"""
return 0
def is_tracing(): # real signature unknown; restored from __doc__
"""
is_tracing()->bool
True if the tracemalloc module is tracing Python memory allocations,
False otherwise.
"""
return False
def start(nframe=1): # real signature unknown; restored from __doc__
"""
start(nframe: int=1)
Start tracing Python memory allocations. Set also the maximum number
of frames stored in the traceback of a trace to nframe.
"""
pass
def stop(): # real signature unknown; restored from __doc__
"""
stop()
Stop tracing Python memory allocations and clear traces
of memory blocks allocated by Python.
"""
pass
def _get_object_traceback(obj): # real signature unknown; restored from __doc__
"""
_get_object_traceback(obj)
Get the traceback where the Python object obj was allocated.
Return a tuple of (filename: str, lineno: int) tuples.
Return None if the tracemalloc module is disabled or did not
trace the allocation of the object.
"""
pass
def _get_traces(): # real signature unknown; restored from __doc__
"""
_get_traces() -> list
Get traces of all memory blocks allocated by Python.
Return a list of (size: int, traceback: tuple) tuples.
traceback is a tuple of (filename: str, lineno: int) tuples.
Return an empty list if the tracemalloc module is disabled.
"""
return []
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
# variables with complex values
__spec__ = None # (!) real value is ''
| en | 0.762851 | # encoding: utf-8 # module _tracemalloc # from (built-in) # by generator 1.145 Debug module to trace memory blocks allocated by Python. # no imports # functions # real signature unknown; restored from __doc__ clear_traces() Clear traces of memory blocks allocated by Python. # real signature unknown; restored from __doc__ get_traceback_limit() -> int Get the maximum number of frames stored in the traceback of a trace. By default, a trace of an allocated memory block only stores the most recent frame: the limit is 1. # real signature unknown; restored from __doc__ get_traced_memory() -> (int, int) Get the current size and peak size of memory blocks traced by the tracemalloc module as a tuple: (current: int, peak: int). # real signature unknown; restored from __doc__ get_tracemalloc_memory() -> int Get the memory usage in bytes of the tracemalloc module used internally to trace memory allocations. # real signature unknown; restored from __doc__ is_tracing()->bool True if the tracemalloc module is tracing Python memory allocations, False otherwise. # real signature unknown; restored from __doc__ start(nframe: int=1) Start tracing Python memory allocations. Set also the maximum number of frames stored in the traceback of a trace to nframe. # real signature unknown; restored from __doc__ stop() Stop tracing Python memory allocations and clear traces of memory blocks allocated by Python. # real signature unknown; restored from __doc__ _get_object_traceback(obj) Get the traceback where the Python object obj was allocated. Return a tuple of (filename: str, lineno: int) tuples. Return None if the tracemalloc module is disabled or did not trace the allocation of the object. # real signature unknown; restored from __doc__ _get_traces() -> list Get traces of all memory blocks allocated by Python. Return a list of (size: int, traceback: tuple) tuples. traceback is a tuple of (filename: str, lineno: int) tuples. Return an empty list if the tracemalloc module is disabled. # classes Meta path import for built-in modules. All methods are either class or static methods to avoid the need to instantiate the class. # real signature unknown Create a built-in module # real signature unknown Exec a built-in module # real signature unknown Find the built-in module. If 'path' is ever specified then the search is considered a failure. This method is deprecated. Use find_spec() instead. # real signature unknown # real signature unknown Return None as built-in modules do not have code objects. # real signature unknown Return None as built-in modules do not have source code. # real signature unknown Return False as built-in modules are never packages. # real signature unknown Load the specified module into sys.modules and return it. This method is deprecated. Use loader.exec_module instead. # reliably restored by inspect Return repr for the module. The method is deprecated. The import machinery does the job itself. # real signature unknown # default list of weak references to the object (if defined) # (!) real value is '' # variables with complex values # (!) real value is '' | 2.639787 | 3 |
src/commercetools/types/_base.py | mikedingjan/commercetools-python-sdk | 0 | 6630354 | <filename>src/commercetools/types/_base.py
# DO NOT EDIT! This file is automatically generated
import datetime
import typing
import attr
if typing.TYPE_CHECKING:
from ._common import Resource
__all__ = ["PagedQueryResponse", "Update", "UpdateAction"]
@attr.s(auto_attribs=True, init=False, repr=False)
class PagedQueryResponse:
"Corresponding marshmallow schema is :class:`commercetools.schemas.PagedQueryResponseSchema`."
#: :class:`int`
count: typing.Optional[int]
#: Optional :class:`int`
total: typing.Optional[int]
#: :class:`int`
offset: typing.Optional[int]
#: List of :class:`commercetools.types.Resource`
results: typing.Optional[typing.Sequence["Resource"]]
def __init__(
self,
*,
count: typing.Optional[int] = None,
total: typing.Optional[int] = None,
offset: typing.Optional[int] = None,
results: typing.Optional[typing.Sequence["Resource"]] = None
) -> None:
self.count = count
self.total = total
self.offset = offset
self.results = results
def __repr__(self) -> str:
return "PagedQueryResponse(count=%r, total=%r, offset=%r, results=%r)" % (
self.count,
self.total,
self.offset,
self.results,
)
@attr.s(auto_attribs=True, init=False, repr=False)
class Update:
"Corresponding marshmallow schema is :class:`commercetools.schemas.UpdateSchema`."
#: :class:`int`
version: typing.Optional[int]
#: :class:`list`
actions: typing.Optional[list]
def __init__(
self,
*,
version: typing.Optional[int] = None,
actions: typing.Optional[list] = None
) -> None:
self.version = version
self.actions = actions
def __repr__(self) -> str:
return "Update(version=%r, actions=%r)" % (self.version, self.actions)
@attr.s(auto_attribs=True, init=False, repr=False)
class UpdateAction:
"Corresponding marshmallow schema is :class:`commercetools.schemas.UpdateActionSchema`."
#: :class:`str`
action: typing.Optional[str]
def __init__(self, *, action: typing.Optional[str] = None) -> None:
self.action = action
def __repr__(self) -> str:
return "UpdateAction(action=%r)" % (self.action,)
| <filename>src/commercetools/types/_base.py
# DO NOT EDIT! This file is automatically generated
import datetime
import typing
import attr
if typing.TYPE_CHECKING:
from ._common import Resource
__all__ = ["PagedQueryResponse", "Update", "UpdateAction"]
@attr.s(auto_attribs=True, init=False, repr=False)
class PagedQueryResponse:
"Corresponding marshmallow schema is :class:`commercetools.schemas.PagedQueryResponseSchema`."
#: :class:`int`
count: typing.Optional[int]
#: Optional :class:`int`
total: typing.Optional[int]
#: :class:`int`
offset: typing.Optional[int]
#: List of :class:`commercetools.types.Resource`
results: typing.Optional[typing.Sequence["Resource"]]
def __init__(
self,
*,
count: typing.Optional[int] = None,
total: typing.Optional[int] = None,
offset: typing.Optional[int] = None,
results: typing.Optional[typing.Sequence["Resource"]] = None
) -> None:
self.count = count
self.total = total
self.offset = offset
self.results = results
def __repr__(self) -> str:
return "PagedQueryResponse(count=%r, total=%r, offset=%r, results=%r)" % (
self.count,
self.total,
self.offset,
self.results,
)
@attr.s(auto_attribs=True, init=False, repr=False)
class Update:
"Corresponding marshmallow schema is :class:`commercetools.schemas.UpdateSchema`."
#: :class:`int`
version: typing.Optional[int]
#: :class:`list`
actions: typing.Optional[list]
def __init__(
self,
*,
version: typing.Optional[int] = None,
actions: typing.Optional[list] = None
) -> None:
self.version = version
self.actions = actions
def __repr__(self) -> str:
return "Update(version=%r, actions=%r)" % (self.version, self.actions)
@attr.s(auto_attribs=True, init=False, repr=False)
class UpdateAction:
"Corresponding marshmallow schema is :class:`commercetools.schemas.UpdateActionSchema`."
#: :class:`str`
action: typing.Optional[str]
def __init__(self, *, action: typing.Optional[str] = None) -> None:
self.action = action
def __repr__(self) -> str:
return "UpdateAction(action=%r)" % (self.action,)
| en | 0.305237 | # DO NOT EDIT! This file is automatically generated #: :class:`int` #: Optional :class:`int` #: :class:`int` #: List of :class:`commercetools.types.Resource` #: :class:`int` #: :class:`list` #: :class:`str` | 1.862405 | 2 |
Admission Counselling For Direct Second Year/Web-Application/AdmissionDirectSecondYear/TopMaharashtraCollegeList/views.py | atharvaagrawal/direct-second-year-admission-analysis | 0 | 6630355 | <reponame>atharvaagrawal/direct-second-year-admission-analysis<filename>Admission Counselling For Direct Second Year/Web-Application/AdmissionDirectSecondYear/TopMaharashtraCollegeList/views.py
from django.shortcuts import render
from . models import TopMaharashtraCollegeListModel
from django.db import connection
# Create your views here.
def top_college_of_maharashtra(request):
cursor = connection.cursor()
cursor.execute('select distinct college_name from allocated2019')
row = cursor.fetchall()
college_list = [item for i in row for item in i]
cursor.execute('select distinct category from allocated2019')
row = cursor.fetchall()
category_list = [item for i in row for item in i]
category_get = ""
college_get = ""
result = zip()
if request.method == "POST":
if request.POST.get("Vise"):
category_get = request.POST['category']
college_get = request.POST['college']
if college_get != "Select College Name":
result = TopMaharashtraCollegeListModel.objects.raw("select * from allocated2019 where college_name = %s order by category ",(college_get,))
elif category_get != "Select Category":
result = TopMaharashtraCollegeListModel.objects.raw("select * from allocated2019 where category = %s order by college_name ",(category_get,))
if request.POST.get("All"):
result = TopMaharashtraCollegeListModel.objects.raw("select * from allocated2019 order by college_name")
return render(request,'top_college_of_maharashtra.html',{'college_list':college_list,'category_list':category_list,'result':result})
| Counselling For Direct Second Year/Web-Application/AdmissionDirectSecondYear/TopMaharashtraCollegeList/views.py
from django.shortcuts import render
from . models import TopMaharashtraCollegeListModel
from django.db import connection
# Create your views here.
def top_college_of_maharashtra(request):
cursor = connection.cursor()
cursor.execute('select distinct college_name from allocated2019')
row = cursor.fetchall()
college_list = [item for i in row for item in i]
cursor.execute('select distinct category from allocated2019')
row = cursor.fetchall()
category_list = [item for i in row for item in i]
category_get = ""
college_get = ""
result = zip()
if request.method == "POST":
if request.POST.get("Vise"):
category_get = request.POST['category']
college_get = request.POST['college']
if college_get != "Select College Name":
result = TopMaharashtraCollegeListModel.objects.raw("select * from allocated2019 where college_name = %s order by category ",(college_get,))
elif category_get != "Select Category":
result = TopMaharashtraCollegeListModel.objects.raw("select * from allocated2019 where category = %s order by college_name ",(category_get,))
if request.POST.get("All"):
result = TopMaharashtraCollegeListModel.objects.raw("select * from allocated2019 order by college_name")
return render(request,'top_college_of_maharashtra.html',{'college_list':college_list,'category_list':category_list,'result':result}) | en | 0.968116 | # Create your views here. | 2.817563 | 3 |
c10_design/main.py | yonoho/pyalgorithm | 0 | 6630356 | from typing import Dict, List, Tuple
def huffman_code(weighted_charset: List[Tuple[str, int]]) -> Dict[str, str]:
weighted_charset.sort(key=lambda x: x[1])
trees = []
for char, weight in weighted_charset:
trees.append({'v': char, 'w': weight})
while len(trees) > 1:
new_tree = {'l': trees[0], 'r': trees[1], 'w': trees[0]['w'] + trees[1]['w']}
trees = trees[2:] + [new_tree]
trees.sort(key=lambda x: x['w'])
return scan_code_tree(trees[0])
def scan_code_tree(tree: dict, parent_codes: List[str] = []):
codes = {}
if tree.get('l'):
codes.update(scan_code_tree(tree['l'], parent_codes + ['0']))
if tree.get('r'):
codes.update(scan_code_tree(tree['r'], parent_codes + ['1']))
if tree.get('v'):
codes[tree['v']] = ''.join(parent_codes)
return codes
| from typing import Dict, List, Tuple
def huffman_code(weighted_charset: List[Tuple[str, int]]) -> Dict[str, str]:
weighted_charset.sort(key=lambda x: x[1])
trees = []
for char, weight in weighted_charset:
trees.append({'v': char, 'w': weight})
while len(trees) > 1:
new_tree = {'l': trees[0], 'r': trees[1], 'w': trees[0]['w'] + trees[1]['w']}
trees = trees[2:] + [new_tree]
trees.sort(key=lambda x: x['w'])
return scan_code_tree(trees[0])
def scan_code_tree(tree: dict, parent_codes: List[str] = []):
codes = {}
if tree.get('l'):
codes.update(scan_code_tree(tree['l'], parent_codes + ['0']))
if tree.get('r'):
codes.update(scan_code_tree(tree['r'], parent_codes + ['1']))
if tree.get('v'):
codes[tree['v']] = ''.join(parent_codes)
return codes
| none | 1 | 3.435054 | 3 |
|
sdk/python/pulumi_aws/ssm/activation.py | Otanikotani/pulumi-aws | 0 | 6630357 | <reponame>Otanikotani/pulumi-aws
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['Activation']
class Activation(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
expiration_date: Optional[pulumi.Input[str]] = None,
iam_role: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
registration_limit: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Registers an on-premises server or virtual machine with Amazon EC2 so that it can be managed using Run Command.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test_role = aws.iam.Role("testRole", assume_role_policy=\"\"\" {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Principal": {"Service": "ssm.amazonaws.com"},
"Action": "sts:AssumeRole"
}
}
\"\"\")
test_attach = aws.iam.RolePolicyAttachment("testAttach",
role=test_role.name,
policy_arn="arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore")
foo = aws.ssm.Activation("foo",
description="Test",
iam_role=test_role.id,
registration_limit=5,
opts=ResourceOptions(depends_on=[test_attach]))
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the resource that you want to register.
:param pulumi.Input[str] expiration_date: UTC timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) by which this activation request should expire. The default value is 24 hours from resource creation time. This provider will only perform drift detection of its value when present in a configuration.
:param pulumi.Input[str] iam_role: The IAM Role to attach to the managed instance.
:param pulumi.Input[str] name: The default name of the registered managed instance.
:param pulumi.Input[int] registration_limit: The maximum number of managed instances you want to register. The default value is 1 instance.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the object.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['expiration_date'] = expiration_date
if iam_role is None:
raise TypeError("Missing required property 'iam_role'")
__props__['iam_role'] = iam_role
__props__['name'] = name
__props__['registration_limit'] = registration_limit
__props__['tags'] = tags
__props__['activation_code'] = None
__props__['expired'] = None
__props__['registration_count'] = None
super(Activation, __self__).__init__(
'aws:ssm/activation:Activation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
activation_code: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
expiration_date: Optional[pulumi.Input[str]] = None,
expired: Optional[pulumi.Input[bool]] = None,
iam_role: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
registration_count: Optional[pulumi.Input[int]] = None,
registration_limit: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Activation':
"""
Get an existing Activation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] activation_code: The code the system generates when it processes the activation.
:param pulumi.Input[str] description: The description of the resource that you want to register.
:param pulumi.Input[str] expiration_date: UTC timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) by which this activation request should expire. The default value is 24 hours from resource creation time. This provider will only perform drift detection of its value when present in a configuration.
:param pulumi.Input[bool] expired: If the current activation has expired.
:param pulumi.Input[str] iam_role: The IAM Role to attach to the managed instance.
:param pulumi.Input[str] name: The default name of the registered managed instance.
:param pulumi.Input[int] registration_count: The number of managed instances that are currently registered using this activation.
:param pulumi.Input[int] registration_limit: The maximum number of managed instances you want to register. The default value is 1 instance.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the object.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["activation_code"] = activation_code
__props__["description"] = description
__props__["expiration_date"] = expiration_date
__props__["expired"] = expired
__props__["iam_role"] = iam_role
__props__["name"] = name
__props__["registration_count"] = registration_count
__props__["registration_limit"] = registration_limit
__props__["tags"] = tags
return Activation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="activationCode")
def activation_code(self) -> pulumi.Output[str]:
"""
The code the system generates when it processes the activation.
"""
return pulumi.get(self, "activation_code")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the resource that you want to register.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="expirationDate")
def expiration_date(self) -> pulumi.Output[str]:
"""
UTC timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) by which this activation request should expire. The default value is 24 hours from resource creation time. This provider will only perform drift detection of its value when present in a configuration.
"""
return pulumi.get(self, "expiration_date")
@property
@pulumi.getter
def expired(self) -> pulumi.Output[bool]:
"""
If the current activation has expired.
"""
return pulumi.get(self, "expired")
@property
@pulumi.getter(name="iamRole")
def iam_role(self) -> pulumi.Output[str]:
"""
The IAM Role to attach to the managed instance.
"""
return pulumi.get(self, "iam_role")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The default name of the registered managed instance.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="registrationCount")
def registration_count(self) -> pulumi.Output[int]:
"""
The number of managed instances that are currently registered using this activation.
"""
return pulumi.get(self, "registration_count")
@property
@pulumi.getter(name="registrationLimit")
def registration_limit(self) -> pulumi.Output[Optional[int]]:
"""
The maximum number of managed instances you want to register. The default value is 1 instance.
"""
return pulumi.get(self, "registration_limit")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the object.
"""
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['Activation']
class Activation(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
expiration_date: Optional[pulumi.Input[str]] = None,
iam_role: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
registration_limit: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Registers an on-premises server or virtual machine with Amazon EC2 so that it can be managed using Run Command.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test_role = aws.iam.Role("testRole", assume_role_policy=\"\"\" {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Principal": {"Service": "ssm.amazonaws.com"},
"Action": "sts:AssumeRole"
}
}
\"\"\")
test_attach = aws.iam.RolePolicyAttachment("testAttach",
role=test_role.name,
policy_arn="arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore")
foo = aws.ssm.Activation("foo",
description="Test",
iam_role=test_role.id,
registration_limit=5,
opts=ResourceOptions(depends_on=[test_attach]))
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the resource that you want to register.
:param pulumi.Input[str] expiration_date: UTC timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) by which this activation request should expire. The default value is 24 hours from resource creation time. This provider will only perform drift detection of its value when present in a configuration.
:param pulumi.Input[str] iam_role: The IAM Role to attach to the managed instance.
:param pulumi.Input[str] name: The default name of the registered managed instance.
:param pulumi.Input[int] registration_limit: The maximum number of managed instances you want to register. The default value is 1 instance.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the object.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['expiration_date'] = expiration_date
if iam_role is None:
raise TypeError("Missing required property 'iam_role'")
__props__['iam_role'] = iam_role
__props__['name'] = name
__props__['registration_limit'] = registration_limit
__props__['tags'] = tags
__props__['activation_code'] = None
__props__['expired'] = None
__props__['registration_count'] = None
super(Activation, __self__).__init__(
'aws:ssm/activation:Activation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
activation_code: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
expiration_date: Optional[pulumi.Input[str]] = None,
expired: Optional[pulumi.Input[bool]] = None,
iam_role: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
registration_count: Optional[pulumi.Input[int]] = None,
registration_limit: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Activation':
"""
Get an existing Activation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] activation_code: The code the system generates when it processes the activation.
:param pulumi.Input[str] description: The description of the resource that you want to register.
:param pulumi.Input[str] expiration_date: UTC timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) by which this activation request should expire. The default value is 24 hours from resource creation time. This provider will only perform drift detection of its value when present in a configuration.
:param pulumi.Input[bool] expired: If the current activation has expired.
:param pulumi.Input[str] iam_role: The IAM Role to attach to the managed instance.
:param pulumi.Input[str] name: The default name of the registered managed instance.
:param pulumi.Input[int] registration_count: The number of managed instances that are currently registered using this activation.
:param pulumi.Input[int] registration_limit: The maximum number of managed instances you want to register. The default value is 1 instance.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the object.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["activation_code"] = activation_code
__props__["description"] = description
__props__["expiration_date"] = expiration_date
__props__["expired"] = expired
__props__["iam_role"] = iam_role
__props__["name"] = name
__props__["registration_count"] = registration_count
__props__["registration_limit"] = registration_limit
__props__["tags"] = tags
return Activation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="activationCode")
def activation_code(self) -> pulumi.Output[str]:
"""
The code the system generates when it processes the activation.
"""
return pulumi.get(self, "activation_code")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the resource that you want to register.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="expirationDate")
def expiration_date(self) -> pulumi.Output[str]:
"""
UTC timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) by which this activation request should expire. The default value is 24 hours from resource creation time. This provider will only perform drift detection of its value when present in a configuration.
"""
return pulumi.get(self, "expiration_date")
@property
@pulumi.getter
def expired(self) -> pulumi.Output[bool]:
"""
If the current activation has expired.
"""
return pulumi.get(self, "expired")
@property
@pulumi.getter(name="iamRole")
def iam_role(self) -> pulumi.Output[str]:
"""
The IAM Role to attach to the managed instance.
"""
return pulumi.get(self, "iam_role")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The default name of the registered managed instance.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="registrationCount")
def registration_count(self) -> pulumi.Output[int]:
"""
The number of managed instances that are currently registered using this activation.
"""
return pulumi.get(self, "registration_count")
@property
@pulumi.getter(name="registrationLimit")
def registration_limit(self) -> pulumi.Output[Optional[int]]:
"""
The maximum number of managed instances you want to register. The default value is 1 instance.
"""
return pulumi.get(self, "registration_limit")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the object.
"""
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | en | 0.744399 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** Registers an on-premises server or virtual machine with Amazon EC2 so that it can be managed using Run Command. ## Example Usage ```python import pulumi import pulumi_aws as aws test_role = aws.iam.Role("testRole", assume_role_policy=\"\"\" { "Version": "2012-10-17", "Statement": { "Effect": "Allow", "Principal": {"Service": "ssm.amazonaws.com"}, "Action": "sts:AssumeRole" } } \"\"\") test_attach = aws.iam.RolePolicyAttachment("testAttach", role=test_role.name, policy_arn="arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore") foo = aws.ssm.Activation("foo", description="Test", iam_role=test_role.id, registration_limit=5, opts=ResourceOptions(depends_on=[test_attach])) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: The description of the resource that you want to register. :param pulumi.Input[str] expiration_date: UTC timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) by which this activation request should expire. The default value is 24 hours from resource creation time. This provider will only perform drift detection of its value when present in a configuration. :param pulumi.Input[str] iam_role: The IAM Role to attach to the managed instance. :param pulumi.Input[str] name: The default name of the registered managed instance. :param pulumi.Input[int] registration_limit: The maximum number of managed instances you want to register. The default value is 1 instance. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the object. Get an existing Activation resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] activation_code: The code the system generates when it processes the activation. :param pulumi.Input[str] description: The description of the resource that you want to register. :param pulumi.Input[str] expiration_date: UTC timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) by which this activation request should expire. The default value is 24 hours from resource creation time. This provider will only perform drift detection of its value when present in a configuration. :param pulumi.Input[bool] expired: If the current activation has expired. :param pulumi.Input[str] iam_role: The IAM Role to attach to the managed instance. :param pulumi.Input[str] name: The default name of the registered managed instance. :param pulumi.Input[int] registration_count: The number of managed instances that are currently registered using this activation. :param pulumi.Input[int] registration_limit: The maximum number of managed instances you want to register. The default value is 1 instance. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the object. The code the system generates when it processes the activation. The description of the resource that you want to register. UTC timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) by which this activation request should expire. The default value is 24 hours from resource creation time. This provider will only perform drift detection of its value when present in a configuration. If the current activation has expired. The IAM Role to attach to the managed instance. The default name of the registered managed instance. The number of managed instances that are currently registered using this activation. The maximum number of managed instances you want to register. The default value is 1 instance. A map of tags to assign to the object. | 1.704631 | 2 |
scripts/release_issue_status/main.py | vincenttran-msft/azure-sdk-for-python | 1 | 6630358 | import time
import os
import re
from datetime import date, datetime
import subprocess as sp
import traceback
import logging
from github import Github
from reply_generator import AUTO_ASK_FOR_CHECK, begin_reply_generate
from utils import update_issue_body, get_readme_and_output_folder, \
get_python_pipelines, get_pipeline_url, auto_close_issue
_NULL = ' '
_FILE_OUT = 'release_issue_status.csv'
_FILE_OUT_PYTHON = 'release_python_status.md'
_PYTHON_SDK_ADMINISTRATORS = ['msyyc', 'BigCat20196', 'azure-sdk']
_PYTHON_SDK_ASSIGNEES = ['BigCat20196', 'msyyc']
_ASSIGNER_DICT = {'BigCat20196': os.getenv('JF_TOKEN'),
'msyyc': os.getenv('TOKEN')}
logging.basicConfig(level=logging.INFO,
format='[auto-reply log] - %(funcName)s[line:%(lineno)d] - %(levelname)s: %(message)s')
def my_print(cmd):
print('==' + cmd + ' ==\n')
def print_check(cmd):
my_print(cmd)
sp.check_call(cmd, shell=True)
def output_python_md(issue_status_python):
with open(_FILE_OUT_PYTHON, 'w') as file_out:
file_out.write(
'| issue | author | package | assignee | bot advice | created date of issue | target release date | date from target |\n')
file_out.write('| ------ | ------ | ------ | ------ | ------ | ------ | ------ | :-----: |\n')
file_out.writelines([item.output_python() for item in sorted(issue_status_python, key=_key_select)])
def output_csv(issue_status):
with open(_FILE_OUT, 'w') as file_out:
file_out.write('language,issue,author,package,created date,delay from created date,latest update time,'
'delay from latest update,status,bot advice\n')
file_out.writelines([item.output() for item in sorted(issue_status, key=_key_select)])
class IssueStatus:
link = _NULL
author = _NULL
package = _NULL
create_date = 0.0
delay_from_create_date = 0
latest_update = 0.0
delay_from_latest_update = 0
status = 'confirm'
bot_advice = _NULL
comment_num = 0
language = _NULL
author_latest_comment = _NULL
whether_author_comment = True
issue_object = _NULL
labels = _NULL
assignee = _NULL
target_date = _NULL
days_from_target = _NULL
def output(self):
return '{},{},{},{},{},{},{},{},{},{}\n'.format(self.language, self.link, self.author,
self.package,
str(date.fromtimestamp(self.create_date)),
self.delay_from_create_date,
str(date.fromtimestamp(self.latest_update)),
self.delay_from_latest_update,
self.status, self.bot_advice)
def output_python(self):
package = self.package.split('-')[-1]
create_date = str(date.fromtimestamp(self.create_date).strftime('%m-%d'))
target_date = str(datetime.strptime(self.target_date, "%Y-%m-%d").strftime('%m-%d'))
if abs(self.days_from_target) < 3:
days_from_target = str(self.days_from_target)
else:
days_from_target = ' '
return '| [#{}]({}) | {} | {} | {} | {} | {} | {} | {} |\n'.format(self.link.split('/')[-1], self.link,
self.author,
package, self.assignee, self.bot_advice,
create_date,
target_date,
days_from_target
)
def _extract(str_list, key_word):
for item in str_list:
if re.fullmatch(key_word, item):
return item.strip()
return _NULL
def _time_format_transform(time_gmt):
return str(datetime.strptime(time_gmt, '%a, %d %b %Y %H:%M:%S GMT'))
def _judge_status(labels):
for label in labels:
if label.name == 'release':
return 'release'
return 'confirm'
def _extract_language(labels):
language = {'Python', 'JS', 'Go', 'Java', 'Ruby'}
label_set = {label.name for label in labels}
intersect = language.intersection(label_set)
return _NULL if not intersect else intersect.pop()
def _key_select(item):
return item.package
def _extract_author_latest_comment(comments):
q = [(comment.updated_at.timestamp(), comment.user.login) for comment in comments]
q.sort()
return _NULL if not q else q[-1][1]
def _whether_author_comment(comments):
q = set(comment.user.login for comment in comments)
diff = q.difference(_PYTHON_SDK_ADMINISTRATORS)
return len(diff) > 0
def _latest_comment_time(comments, delay_from_create_date):
q = [(comment.updated_at.timestamp(), comment.user.login)
for comment in comments if comment.user.login not in _PYTHON_SDK_ADMINISTRATORS]
q.sort()
return delay_from_create_date if not q else int((time.time() - q[-1][0]) / 3600 / 24)
def auto_reply(item, request_repo, rest_repo, duplicated_issue, python_piplines, assigner_repoes):
logging.info("new issue number: {}".format(item.issue_object.number))
assigner_repo = assigner_repoes[item.assignee]
if 'auto-link' not in item.labels:
item.issue_object.add_to_labels('auto-link')
try:
package_name, readme_link, output_folder = update_issue_body(assigner_repo, rest_repo, item.issue_object.number)
logging.info("pkname, readme", package_name, readme_link)
item.package = package_name
key = ('Python', item.package)
duplicated_issue[key] = duplicated_issue.get(key, 0) + 1
except Exception as e:
item.bot_advice = 'failed to modify the body of the new issue. Please modify manually'
item.issue_object.add_to_labels('attention')
logging.info(e)
raise
else:
try:
readme_link, output_folder = get_readme_and_output_folder(request_repo, rest_repo, item.issue_object.number)
except Exception as e:
logging.info('Issue: {} get pkname and output folder failed'.format(item.issue_object.number))
item.bot_advice = 'failed to find Readme link and output folder! <br>'
item.issue_object.add_to_labels('attention')
logging.info(e)
raise
try:
pipeline_url = get_pipeline_url(python_piplines, output_folder)
begin_reply_generate(item=item, rest_repo=rest_repo, readme_link=readme_link, pipeline_url=pipeline_url)
if 'Configured' in item.labels:
item.issue_object.remove_from_labels('Configured')
except Exception as e:
item.bot_advice = 'auto reply failed! <br>'
logging.info('Error from auto reply')
logging.info('Issue:{}'.format(item.issue_object.number))
logging.info(traceback.format_exc())
def main():
# get latest issue status
g = Github(os.getenv('TOKEN')) # please fill user_token
assigner_repoes = {}
for k, v in _ASSIGNER_DICT.items():
assigner_repoes[k] = Github(v).get_repo('Azure/sdk-release-request')
request_repo = g.get_repo('Azure/sdk-release-request')
rest_repo = g.get_repo('Azure/azure-rest-api-specs')
sdk_repo = g.get_repo('Azure/azure-sdk-for-python')
label1 = request_repo.get_label('ManagementPlane')
label2 = request_repo.get_label('Python')
open_issues = request_repo.get_issues(state='open', labels=[label1, label2])
issue_status = []
issue_status_python = []
duplicated_issue = dict()
start_time = time.time()
# get pipeline definitionid
python_piplines = get_python_pipelines()
for item in open_issues:
if not item.number:
continue
issue = IssueStatus()
issue.link = f'https://github.com/Azure/sdk-release-request/issues/{item.number}'
issue.author = item.user.login
issue.package = _extract(item.body.split('\n'), 'azure-.*')
try:
issue.target_date = [x.split(':')[-1].strip() for x in item.body.split('\n') if 'Target release date' in x][0]
issue.days_from_target = int(
(time.mktime(time.strptime(issue.target_date, '%Y-%m-%d')) - time.time()) / 3600 / 24)
except Exception:
issue.target_date = 'fail to get.'
issue.days_from_target = 1000 # make a ridiculous data to remind failure when error happens
issue.create_date = item.created_at.timestamp()
issue.delay_from_create_date = int((time.time() - item.created_at.timestamp()) / 3600 / 24)
issue.latest_update = item.updated_at.timestamp()
issue.delay_from_latest_update = int((time.time() - item.updated_at.timestamp()) / 3600 / 24)
issue.status = _judge_status(item.labels)
issue.comment_num = item.comments
issue.language = _extract_language(item.labels)
issue.author_latest_comment = _extract_author_latest_comment(item.get_comments())
issue.whether_author_comment = _whether_author_comment(item.get_comments())
issue.issue_object = item
issue.labels = [label.name for label in item.labels]
issue.days_from_latest_comment = _latest_comment_time(item.get_comments(), issue.delay_from_create_date)
if item.assignee:
issue.assignee = item.assignee.login
issue_status.append(issue)
key = (issue.language, issue.package)
duplicated_issue[key] = duplicated_issue.get(key, 0) + 1
my_print('Have cost {} seconds'.format(int(time.time() - start_time)))
# rule1: if status is 'release', need to release asap
# rule2: if latest comment is from author, need response asap
# rule3: if comment num is 0, it is new issue, better to deal with it asap
# rule4: if delay from latest update is over 7 days, better to deal with it soon.
# rule5: if delay from created date is over 30 days, better to close.
# rule6: if delay from created date is over 30 days and owner never reply, close it.
# rule7: if delay from created date is over 15 days and owner never reply, remind owner to handle it.
for item in issue_status:
if item.language == 'Python':
assigner_repo = assigner_repoes[item.assignee]
item.issue_object = assigner_repo.get_issue(number=item.issue_object.number)
issue_status_python.append(item)
if item.status == 'release':
item.bot_advice = 'better to release asap.'
elif (item.comment_num == 0 or 'Configured' in item.labels) and 'Python' in item.labels:
item.bot_advice = 'new issue ! <br>'
if 'assigned' not in item.labels:
time.sleep(0.1)
assign_count = int(str(time.time())[-1]) % len(_PYTHON_SDK_ASSIGNEES)
item.issue_object.remove_from_assignees(item.assignee)
item.issue_object.add_to_assignees(_PYTHON_SDK_ASSIGNEES[assign_count])
item.assignee = item.issue_object.assignee.login
item.issue_object.add_to_labels('assigned')
if AUTO_ASK_FOR_CHECK not in item.labels:
try:
auto_reply(item, request_repo, rest_repo, duplicated_issue, python_piplines, assigner_repoes)
except Exception as e:
continue
elif not item.author_latest_comment in _PYTHON_SDK_ADMINISTRATORS:
item.bot_advice = 'new comment. <br>'
if item.comment_num > 1 and item.language == 'Python':
try:
auto_close_issue(request_repo, item)
except Exception as e:
item.bot_advice = 'auto-close failed, please check!'
logging.info(f"=====issue: {item.issue_object.number}, {e}")
if 'base-branch-attention' in item.labels:
item.bot_advice = 'new version is 0.0.0, please check base branch! ' + item.bot_advice
if abs(item.days_from_target) < 3:
item.bot_advice += ' release date < 2 ! <br>'
if item.days_from_latest_comment >= 15 and item.language == 'Python' and '7days attention' in item.labels and item.days_from_target < 0:
item.issue_object.create_comment(
f'hi @{item.author}, the issue is closed since there is no reply for a long time. Please reopen it if necessary or create new one.')
item.issue_object.edit(state='close')
elif item.days_from_latest_comment >= 7 and item.language == 'Python' and '7days attention' not in item.labels and item.days_from_target < 7:
item.issue_object.create_comment(
f'hi @{item.author}, this release-request has been delayed more than 7 days,'
' please deal with it ASAP. We will close the issue if there is still no response after 7 days!')
item.issue_object.add_to_labels('7days attention')
# judge whether there is duplicated issue for same package
if item.package != _NULL and duplicated_issue.get((item.language, item.package)) > 1:
item.bot_advice = f'duplicated issue <br>' + item.bot_advice
# output result
output_python_md(issue_status_python)
output_csv(issue_status)
# commit to github
print_check('git add .')
print_check('git commit -m \"update excel\"')
print_check('git push -f origin HEAD')
if __name__ == '__main__':
main()
| import time
import os
import re
from datetime import date, datetime
import subprocess as sp
import traceback
import logging
from github import Github
from reply_generator import AUTO_ASK_FOR_CHECK, begin_reply_generate
from utils import update_issue_body, get_readme_and_output_folder, \
get_python_pipelines, get_pipeline_url, auto_close_issue
_NULL = ' '
_FILE_OUT = 'release_issue_status.csv'
_FILE_OUT_PYTHON = 'release_python_status.md'
_PYTHON_SDK_ADMINISTRATORS = ['msyyc', 'BigCat20196', 'azure-sdk']
_PYTHON_SDK_ASSIGNEES = ['BigCat20196', 'msyyc']
_ASSIGNER_DICT = {'BigCat20196': os.getenv('JF_TOKEN'),
'msyyc': os.getenv('TOKEN')}
logging.basicConfig(level=logging.INFO,
format='[auto-reply log] - %(funcName)s[line:%(lineno)d] - %(levelname)s: %(message)s')
def my_print(cmd):
print('==' + cmd + ' ==\n')
def print_check(cmd):
my_print(cmd)
sp.check_call(cmd, shell=True)
def output_python_md(issue_status_python):
with open(_FILE_OUT_PYTHON, 'w') as file_out:
file_out.write(
'| issue | author | package | assignee | bot advice | created date of issue | target release date | date from target |\n')
file_out.write('| ------ | ------ | ------ | ------ | ------ | ------ | ------ | :-----: |\n')
file_out.writelines([item.output_python() for item in sorted(issue_status_python, key=_key_select)])
def output_csv(issue_status):
with open(_FILE_OUT, 'w') as file_out:
file_out.write('language,issue,author,package,created date,delay from created date,latest update time,'
'delay from latest update,status,bot advice\n')
file_out.writelines([item.output() for item in sorted(issue_status, key=_key_select)])
class IssueStatus:
link = _NULL
author = _NULL
package = _NULL
create_date = 0.0
delay_from_create_date = 0
latest_update = 0.0
delay_from_latest_update = 0
status = 'confirm'
bot_advice = _NULL
comment_num = 0
language = _NULL
author_latest_comment = _NULL
whether_author_comment = True
issue_object = _NULL
labels = _NULL
assignee = _NULL
target_date = _NULL
days_from_target = _NULL
def output(self):
return '{},{},{},{},{},{},{},{},{},{}\n'.format(self.language, self.link, self.author,
self.package,
str(date.fromtimestamp(self.create_date)),
self.delay_from_create_date,
str(date.fromtimestamp(self.latest_update)),
self.delay_from_latest_update,
self.status, self.bot_advice)
def output_python(self):
package = self.package.split('-')[-1]
create_date = str(date.fromtimestamp(self.create_date).strftime('%m-%d'))
target_date = str(datetime.strptime(self.target_date, "%Y-%m-%d").strftime('%m-%d'))
if abs(self.days_from_target) < 3:
days_from_target = str(self.days_from_target)
else:
days_from_target = ' '
return '| [#{}]({}) | {} | {} | {} | {} | {} | {} | {} |\n'.format(self.link.split('/')[-1], self.link,
self.author,
package, self.assignee, self.bot_advice,
create_date,
target_date,
days_from_target
)
def _extract(str_list, key_word):
for item in str_list:
if re.fullmatch(key_word, item):
return item.strip()
return _NULL
def _time_format_transform(time_gmt):
return str(datetime.strptime(time_gmt, '%a, %d %b %Y %H:%M:%S GMT'))
def _judge_status(labels):
for label in labels:
if label.name == 'release':
return 'release'
return 'confirm'
def _extract_language(labels):
language = {'Python', 'JS', 'Go', 'Java', 'Ruby'}
label_set = {label.name for label in labels}
intersect = language.intersection(label_set)
return _NULL if not intersect else intersect.pop()
def _key_select(item):
return item.package
def _extract_author_latest_comment(comments):
q = [(comment.updated_at.timestamp(), comment.user.login) for comment in comments]
q.sort()
return _NULL if not q else q[-1][1]
def _whether_author_comment(comments):
q = set(comment.user.login for comment in comments)
diff = q.difference(_PYTHON_SDK_ADMINISTRATORS)
return len(diff) > 0
def _latest_comment_time(comments, delay_from_create_date):
q = [(comment.updated_at.timestamp(), comment.user.login)
for comment in comments if comment.user.login not in _PYTHON_SDK_ADMINISTRATORS]
q.sort()
return delay_from_create_date if not q else int((time.time() - q[-1][0]) / 3600 / 24)
def auto_reply(item, request_repo, rest_repo, duplicated_issue, python_piplines, assigner_repoes):
logging.info("new issue number: {}".format(item.issue_object.number))
assigner_repo = assigner_repoes[item.assignee]
if 'auto-link' not in item.labels:
item.issue_object.add_to_labels('auto-link')
try:
package_name, readme_link, output_folder = update_issue_body(assigner_repo, rest_repo, item.issue_object.number)
logging.info("pkname, readme", package_name, readme_link)
item.package = package_name
key = ('Python', item.package)
duplicated_issue[key] = duplicated_issue.get(key, 0) + 1
except Exception as e:
item.bot_advice = 'failed to modify the body of the new issue. Please modify manually'
item.issue_object.add_to_labels('attention')
logging.info(e)
raise
else:
try:
readme_link, output_folder = get_readme_and_output_folder(request_repo, rest_repo, item.issue_object.number)
except Exception as e:
logging.info('Issue: {} get pkname and output folder failed'.format(item.issue_object.number))
item.bot_advice = 'failed to find Readme link and output folder! <br>'
item.issue_object.add_to_labels('attention')
logging.info(e)
raise
try:
pipeline_url = get_pipeline_url(python_piplines, output_folder)
begin_reply_generate(item=item, rest_repo=rest_repo, readme_link=readme_link, pipeline_url=pipeline_url)
if 'Configured' in item.labels:
item.issue_object.remove_from_labels('Configured')
except Exception as e:
item.bot_advice = 'auto reply failed! <br>'
logging.info('Error from auto reply')
logging.info('Issue:{}'.format(item.issue_object.number))
logging.info(traceback.format_exc())
def main():
# get latest issue status
g = Github(os.getenv('TOKEN')) # please fill user_token
assigner_repoes = {}
for k, v in _ASSIGNER_DICT.items():
assigner_repoes[k] = Github(v).get_repo('Azure/sdk-release-request')
request_repo = g.get_repo('Azure/sdk-release-request')
rest_repo = g.get_repo('Azure/azure-rest-api-specs')
sdk_repo = g.get_repo('Azure/azure-sdk-for-python')
label1 = request_repo.get_label('ManagementPlane')
label2 = request_repo.get_label('Python')
open_issues = request_repo.get_issues(state='open', labels=[label1, label2])
issue_status = []
issue_status_python = []
duplicated_issue = dict()
start_time = time.time()
# get pipeline definitionid
python_piplines = get_python_pipelines()
for item in open_issues:
if not item.number:
continue
issue = IssueStatus()
issue.link = f'https://github.com/Azure/sdk-release-request/issues/{item.number}'
issue.author = item.user.login
issue.package = _extract(item.body.split('\n'), 'azure-.*')
try:
issue.target_date = [x.split(':')[-1].strip() for x in item.body.split('\n') if 'Target release date' in x][0]
issue.days_from_target = int(
(time.mktime(time.strptime(issue.target_date, '%Y-%m-%d')) - time.time()) / 3600 / 24)
except Exception:
issue.target_date = 'fail to get.'
issue.days_from_target = 1000 # make a ridiculous data to remind failure when error happens
issue.create_date = item.created_at.timestamp()
issue.delay_from_create_date = int((time.time() - item.created_at.timestamp()) / 3600 / 24)
issue.latest_update = item.updated_at.timestamp()
issue.delay_from_latest_update = int((time.time() - item.updated_at.timestamp()) / 3600 / 24)
issue.status = _judge_status(item.labels)
issue.comment_num = item.comments
issue.language = _extract_language(item.labels)
issue.author_latest_comment = _extract_author_latest_comment(item.get_comments())
issue.whether_author_comment = _whether_author_comment(item.get_comments())
issue.issue_object = item
issue.labels = [label.name for label in item.labels]
issue.days_from_latest_comment = _latest_comment_time(item.get_comments(), issue.delay_from_create_date)
if item.assignee:
issue.assignee = item.assignee.login
issue_status.append(issue)
key = (issue.language, issue.package)
duplicated_issue[key] = duplicated_issue.get(key, 0) + 1
my_print('Have cost {} seconds'.format(int(time.time() - start_time)))
# rule1: if status is 'release', need to release asap
# rule2: if latest comment is from author, need response asap
# rule3: if comment num is 0, it is new issue, better to deal with it asap
# rule4: if delay from latest update is over 7 days, better to deal with it soon.
# rule5: if delay from created date is over 30 days, better to close.
# rule6: if delay from created date is over 30 days and owner never reply, close it.
# rule7: if delay from created date is over 15 days and owner never reply, remind owner to handle it.
for item in issue_status:
if item.language == 'Python':
assigner_repo = assigner_repoes[item.assignee]
item.issue_object = assigner_repo.get_issue(number=item.issue_object.number)
issue_status_python.append(item)
if item.status == 'release':
item.bot_advice = 'better to release asap.'
elif (item.comment_num == 0 or 'Configured' in item.labels) and 'Python' in item.labels:
item.bot_advice = 'new issue ! <br>'
if 'assigned' not in item.labels:
time.sleep(0.1)
assign_count = int(str(time.time())[-1]) % len(_PYTHON_SDK_ASSIGNEES)
item.issue_object.remove_from_assignees(item.assignee)
item.issue_object.add_to_assignees(_PYTHON_SDK_ASSIGNEES[assign_count])
item.assignee = item.issue_object.assignee.login
item.issue_object.add_to_labels('assigned')
if AUTO_ASK_FOR_CHECK not in item.labels:
try:
auto_reply(item, request_repo, rest_repo, duplicated_issue, python_piplines, assigner_repoes)
except Exception as e:
continue
elif not item.author_latest_comment in _PYTHON_SDK_ADMINISTRATORS:
item.bot_advice = 'new comment. <br>'
if item.comment_num > 1 and item.language == 'Python':
try:
auto_close_issue(request_repo, item)
except Exception as e:
item.bot_advice = 'auto-close failed, please check!'
logging.info(f"=====issue: {item.issue_object.number}, {e}")
if 'base-branch-attention' in item.labels:
item.bot_advice = 'new version is 0.0.0, please check base branch! ' + item.bot_advice
if abs(item.days_from_target) < 3:
item.bot_advice += ' release date < 2 ! <br>'
if item.days_from_latest_comment >= 15 and item.language == 'Python' and '7days attention' in item.labels and item.days_from_target < 0:
item.issue_object.create_comment(
f'hi @{item.author}, the issue is closed since there is no reply for a long time. Please reopen it if necessary or create new one.')
item.issue_object.edit(state='close')
elif item.days_from_latest_comment >= 7 and item.language == 'Python' and '7days attention' not in item.labels and item.days_from_target < 7:
item.issue_object.create_comment(
f'hi @{item.author}, this release-request has been delayed more than 7 days,'
' please deal with it ASAP. We will close the issue if there is still no response after 7 days!')
item.issue_object.add_to_labels('7days attention')
# judge whether there is duplicated issue for same package
if item.package != _NULL and duplicated_issue.get((item.language, item.package)) > 1:
item.bot_advice = f'duplicated issue <br>' + item.bot_advice
# output result
output_python_md(issue_status_python)
output_csv(issue_status)
# commit to github
print_check('git add .')
print_check('git commit -m \"update excel\"')
print_check('git push -f origin HEAD')
if __name__ == '__main__':
main()
| en | 0.907981 | #{}]({}) | {} | {} | {} | {} | {} | {} | {} |\n'.format(self.link.split('/')[-1], self.link, # get latest issue status # please fill user_token # get pipeline definitionid # make a ridiculous data to remind failure when error happens # rule1: if status is 'release', need to release asap # rule2: if latest comment is from author, need response asap # rule3: if comment num is 0, it is new issue, better to deal with it asap # rule4: if delay from latest update is over 7 days, better to deal with it soon. # rule5: if delay from created date is over 30 days, better to close. # rule6: if delay from created date is over 30 days and owner never reply, close it. # rule7: if delay from created date is over 15 days and owner never reply, remind owner to handle it. # judge whether there is duplicated issue for same package # output result # commit to github | 2.319818 | 2 |
examples/01_check_connectivity.py | cyb3rdog/escapepod_python_sdk | 2 | 6630359 | #!/usr/bin/env python3
# Copyright (c) 2021 cyb3rdog
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic Connectiion Check Example script
Check the connectivity to the EscapePod Extension Proxy with this simple SDK script.
In case you'll face issues with connecting to Cyb3rVector's EscapePod Extension Proxy,
please check the github deployment guide: https://github.com/cyb3rdog/escapepod_python_sdk
and make sure all - Vector, EscapePod and Extension Proxy are on the same network and
can reach other
--- ---------------------------------------------------------------------- ---
--- In order for the EscapePod itself to be able to connect to this proxy, ---
--- and push its events here, it needs to know where this proxy is hosted. ---
--- Set following variables in /etc/escape-pod.conf file corresondingly: ---
--- ---
--- ENABLE_EXTENSIONS=true ---
--- ESCAPEPOD_EXTENDER_TARGET=XX.XX.XX.XX:8089 ---
--- ESCAPEPOD_EXTENDER_DISABLE_TLS=true ---
--- ---------------------------------------------------------------------- ---
"""
import escapepod_sdk
def main():
# Replace the "XX.XX.XX.XX" with and ip where the escapepod extension proxy is deployed.
# By default its usually deployed on the escapepod itself, so its same as your escapepod ip.
with escapepod_sdk.extension.Client("XX.XX.XX.XX") as client:
client.wait_for_eventstream()
if client.events.subscribed:
print("GREAT! EscapePod extension proxy is connected and ready!")
else:
print("ERROR: Something went wrong! Event stream not connected!")
client.disconnect()
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
# Copyright (c) 2021 cyb3rdog
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic Connectiion Check Example script
Check the connectivity to the EscapePod Extension Proxy with this simple SDK script.
In case you'll face issues with connecting to Cyb3rVector's EscapePod Extension Proxy,
please check the github deployment guide: https://github.com/cyb3rdog/escapepod_python_sdk
and make sure all - Vector, EscapePod and Extension Proxy are on the same network and
can reach other
--- ---------------------------------------------------------------------- ---
--- In order for the EscapePod itself to be able to connect to this proxy, ---
--- and push its events here, it needs to know where this proxy is hosted. ---
--- Set following variables in /etc/escape-pod.conf file corresondingly: ---
--- ---
--- ENABLE_EXTENSIONS=true ---
--- ESCAPEPOD_EXTENDER_TARGET=XX.XX.XX.XX:8089 ---
--- ESCAPEPOD_EXTENDER_DISABLE_TLS=true ---
--- ---------------------------------------------------------------------- ---
"""
import escapepod_sdk
def main():
# Replace the "XX.XX.XX.XX" with and ip where the escapepod extension proxy is deployed.
# By default its usually deployed on the escapepod itself, so its same as your escapepod ip.
with escapepod_sdk.extension.Client("XX.XX.XX.XX") as client:
client.wait_for_eventstream()
if client.events.subscribed:
print("GREAT! EscapePod extension proxy is connected and ready!")
else:
print("ERROR: Something went wrong! Event stream not connected!")
client.disconnect()
if __name__ == "__main__":
main()
| en | 0.849727 | #!/usr/bin/env python3 # Copyright (c) 2021 cyb3rdog # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the file LICENSE.txt or at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Basic Connectiion Check Example script Check the connectivity to the EscapePod Extension Proxy with this simple SDK script. In case you'll face issues with connecting to Cyb3rVector's EscapePod Extension Proxy, please check the github deployment guide: https://github.com/cyb3rdog/escapepod_python_sdk and make sure all - Vector, EscapePod and Extension Proxy are on the same network and can reach other --- ---------------------------------------------------------------------- --- --- In order for the EscapePod itself to be able to connect to this proxy, --- --- and push its events here, it needs to know where this proxy is hosted. --- --- Set following variables in /etc/escape-pod.conf file corresondingly: --- --- --- --- ENABLE_EXTENSIONS=true --- --- ESCAPEPOD_EXTENDER_TARGET=XX.XX.XX.XX:8089 --- --- ESCAPEPOD_EXTENDER_DISABLE_TLS=true --- --- ---------------------------------------------------------------------- --- # Replace the "XX.XX.XX.XX" with and ip where the escapepod extension proxy is deployed. # By default its usually deployed on the escapepod itself, so its same as your escapepod ip. | 1.273838 | 1 |
hardware/Tests/send_commands.py | unball/ieee-very-small | 5 | 6630360 | <reponame>unball/ieee-very-small
import serial
import struct
s = serial.Serial('/dev/cu.usbserial-A600e0ti', baudrate=19200)
while True:
data_to_write = struct.pack('=B', input('>> '))
print 'Data:', data_to_write
s.write(data_to_write)
| import serial
import struct
s = serial.Serial('/dev/cu.usbserial-A600e0ti', baudrate=19200)
while True:
data_to_write = struct.pack('=B', input('>> '))
print 'Data:', data_to_write
s.write(data_to_write) | none | 1 | 2.584654 | 3 |
|
climateeconomics/tests/l1_test_gradient_carboncycle_discipline.py | os-climate/witness-core | 1 | 6630361 | <reponame>os-climate/witness-core<gh_stars>1-10
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import unittest
import numpy as np
import pandas as pd
from os.path import join, dirname
from pandas import DataFrame, read_csv
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from sos_trades_core.tests.core.abstract_jacobian_unit_test import AbstractJacobianUnittest
class CarboncycleJacobianDiscTest(AbstractJacobianUnittest):
# AbstractJacobianUnittest.DUMP_JACOBIAN = True
def setUp(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
def analytic_grad_entry(self):
return [
self.test_execute,
self.test_execute_2
]
def test_execute(self):
self.model_name = 'carboncycle'
ns_dict = {'ns_witness': f'{self.name}',
'ns_ref': f'{self.name}',
'ns_public': f'{self.name}'}
self.ee.ns_manager.add_ns_def(ns_dict)
mod_path = 'climateeconomics.sos_wrapping.sos_wrapping_witness.carboncycle.carboncycle_discipline.CarbonCycleDiscipline'
builder = self.ee.factory.get_builder_from_module(
self.model_name, mod_path)
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
self.ee.display_treeview_nodes()
data_dir = join(dirname(__file__), 'data')
emission_df_all = read_csv(
join(data_dir, 'co2_emissions_onestep.csv'))
emission_df_y = emission_df_all[emission_df_all['years'] >= 2020][['years',
'total_emissions', 'cum_total_emissions']]
# put manually the index
years = np.arange(2020, 2101)
emission_df_y.index = years
values_dict = {f'{self.name}.CO2_emissions_df': emission_df_y}
self.ee.dm.set_values_from_dict(values_dict)
disc_techno = self.ee.root_process.sos_disciplines[0]
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_carbon_cycle_discipline1.pkl',
discipline=disc_techno, step=1e-15, derr_approx='complex_step',
inputs=[f'{self.name}.CO2_emissions_df'],
outputs=[f'{self.name}.carboncycle_df',
f'{self.name}.ppm_objective',
f'{self.name}.rockstrom_limit_constraint',
f'{self.name}.minimum_ppm_constraint'])
def test_execute_2(self):
# test limit for max for lower_ocean_conc / upper_ocean_conc /
# atmo_conc
self.model_name = 'carboncycle'
ns_dict = {'ns_witness': f'{self.name}',
'ns_public': f'{self.name}',
'ns_ref': f'{self.name}'}
self.ee.ns_manager.add_ns_def(ns_dict)
mod_path = 'climateeconomics.sos_wrapping.sos_wrapping_witness.carboncycle.carboncycle_discipline.CarbonCycleDiscipline'
builder = self.ee.factory.get_builder_from_module(
self.model_name, mod_path)
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
self.ee.display_treeview_nodes()
data_dir = join(dirname(__file__), 'data')
emission_df_all = read_csv(
join(data_dir, 'co2_emissions_onestep.csv'))
emission_df_y = emission_df_all[emission_df_all['years'] >= 2020][['years',
'total_emissions', 'cum_total_emissions']]
# put manually the index
years = np.arange(2020, 2101)
emission_df_y.index = years
values_dict = {f'{self.name}.CO2_emissions_df': emission_df_y}
self.ee.dm.set_values_from_dict(values_dict)
disc_techno = self.ee.root_process.sos_disciplines[0]
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_carbon_cycle_discipline2.pkl',
discipline=disc_techno, step=1e-15, derr_approx='complex_step',
inputs=[f'{self.name}.CO2_emissions_df'],
outputs=[f'{self.name}.carboncycle_df',
f'{self.name}.ppm_objective',
f'{self.name}.rockstrom_limit_constraint',
f'{self.name}.minimum_ppm_constraint'])
| '''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import unittest
import numpy as np
import pandas as pd
from os.path import join, dirname
from pandas import DataFrame, read_csv
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from sos_trades_core.tests.core.abstract_jacobian_unit_test import AbstractJacobianUnittest
class CarboncycleJacobianDiscTest(AbstractJacobianUnittest):
# AbstractJacobianUnittest.DUMP_JACOBIAN = True
def setUp(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
def analytic_grad_entry(self):
return [
self.test_execute,
self.test_execute_2
]
def test_execute(self):
self.model_name = 'carboncycle'
ns_dict = {'ns_witness': f'{self.name}',
'ns_ref': f'{self.name}',
'ns_public': f'{self.name}'}
self.ee.ns_manager.add_ns_def(ns_dict)
mod_path = 'climateeconomics.sos_wrapping.sos_wrapping_witness.carboncycle.carboncycle_discipline.CarbonCycleDiscipline'
builder = self.ee.factory.get_builder_from_module(
self.model_name, mod_path)
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
self.ee.display_treeview_nodes()
data_dir = join(dirname(__file__), 'data')
emission_df_all = read_csv(
join(data_dir, 'co2_emissions_onestep.csv'))
emission_df_y = emission_df_all[emission_df_all['years'] >= 2020][['years',
'total_emissions', 'cum_total_emissions']]
# put manually the index
years = np.arange(2020, 2101)
emission_df_y.index = years
values_dict = {f'{self.name}.CO2_emissions_df': emission_df_y}
self.ee.dm.set_values_from_dict(values_dict)
disc_techno = self.ee.root_process.sos_disciplines[0]
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_carbon_cycle_discipline1.pkl',
discipline=disc_techno, step=1e-15, derr_approx='complex_step',
inputs=[f'{self.name}.CO2_emissions_df'],
outputs=[f'{self.name}.carboncycle_df',
f'{self.name}.ppm_objective',
f'{self.name}.rockstrom_limit_constraint',
f'{self.name}.minimum_ppm_constraint'])
def test_execute_2(self):
# test limit for max for lower_ocean_conc / upper_ocean_conc /
# atmo_conc
self.model_name = 'carboncycle'
ns_dict = {'ns_witness': f'{self.name}',
'ns_public': f'{self.name}',
'ns_ref': f'{self.name}'}
self.ee.ns_manager.add_ns_def(ns_dict)
mod_path = 'climateeconomics.sos_wrapping.sos_wrapping_witness.carboncycle.carboncycle_discipline.CarbonCycleDiscipline'
builder = self.ee.factory.get_builder_from_module(
self.model_name, mod_path)
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
self.ee.display_treeview_nodes()
data_dir = join(dirname(__file__), 'data')
emission_df_all = read_csv(
join(data_dir, 'co2_emissions_onestep.csv'))
emission_df_y = emission_df_all[emission_df_all['years'] >= 2020][['years',
'total_emissions', 'cum_total_emissions']]
# put manually the index
years = np.arange(2020, 2101)
emission_df_y.index = years
values_dict = {f'{self.name}.CO2_emissions_df': emission_df_y}
self.ee.dm.set_values_from_dict(values_dict)
disc_techno = self.ee.root_process.sos_disciplines[0]
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_carbon_cycle_discipline2.pkl',
discipline=disc_techno, step=1e-15, derr_approx='complex_step',
inputs=[f'{self.name}.CO2_emissions_df'],
outputs=[f'{self.name}.carboncycle_df',
f'{self.name}.ppm_objective',
f'{self.name}.rockstrom_limit_constraint',
f'{self.name}.minimum_ppm_constraint']) | en | 0.785716 | Copyright 2022 Airbus SAS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # AbstractJacobianUnittest.DUMP_JACOBIAN = True # put manually the index # test limit for max for lower_ocean_conc / upper_ocean_conc / # atmo_conc # put manually the index | 1.566781 | 2 |
services/buildbot/master/txbuildbot/git.py | graingert/braid | 8 | 6630362 | from twisted.python import log
from twisted.internet import defer
from buildbot.process import buildstep
from buildbot.steps.source import Source
from buildbot.steps.source.git import Git
from buildbot.status.results import SUCCESS
def mungeBranch(branch):
"""
Remove the leading prefix, that comes from svn branches.
"""
if not branch:
return 'trunk'
for cutoff in ['/branches/', 'branches/', '/']:
if branch.startswith(cutoff):
branch = branch[len(cutoff):]
break
return branch
def isTrunk(branch):
"""
Is the branch trunk?
"""
return mungeBranch(branch) == 'trunk'
def isRelease(branch):
"""
Is the branch a release branch?
"""
return mungeBranch(branch).startswith('release-')
def isSafeBranch(branch):
"""
Is this branch a safe branch? That is, is it on a real branch?
"""
return not mungeBranch(branch).startswith('refs/pull')
class MergeForward(Source):
"""
Merge with trunk.
"""
name = 'merge-forward'
description = ['merging', 'forward']
descriptionDone = ['merge', 'forward']
haltOnFailure = True
def __init__(self, repourl, branch='trunk', **kwargs):
self.repourl = repourl
self.branch = branch
kwargs['env'] = {
'GIT_AUTHOR_EMAIL': '<EMAIL>',
'GIT_AUTHOR_NAME': '<NAME>',
'GIT_COMMITTER_EMAIL': '<EMAIL>',
'GIT_COMMITTER_NAME': '<NAME>',
}
Source.__init__(self, **kwargs)
self.addFactoryArguments(repourl=repourl, branch=branch)
def startVC(self, branch, revision, patch):
if not isSafeBranch(branch):
raise ValueError("No building on pull requests.")
self.stdio_log = self.addLog('stdio')
self.step_status.setText(['merging', 'forward'])
d = defer.succeed(None)
if not isTrunk(branch):
d.addCallback(lambda _: self._fetch())
if not (isTrunk(branch) or isRelease(branch)):
d.addCallback(lambda _: self._merge())
if isTrunk(branch):
d.addCallback(lambda _: self._getPreviousVersion())
else:
d.addCallback(lambda _: self._getMergeBase())
d.addCallback(self._setLintVersion)
d.addCallback(lambda _: SUCCESS)
d.addCallbacks(self.finished, self.checkDisconnect)
d.addErrback(self.failed)
def finished(self, results):
if results == SUCCESS:
self.step_status.setText(['merge', 'forward'])
else:
self.step_status.setText(['merge', 'forward', 'failed'])
return Source.finished(self, results)
def _fetch(self):
d = self._dovccmd(['remote', 'set-url', 'origin', self.repourl])
d.addCallback(lambda _: self._dovccmd(['fetch', 'origin']))
return d
def _merge(self):
return self._dovccmd(['merge',
'--no-ff', '--no-stat',
'origin/trunk'])
def _getPreviousVersion(self):
return self._dovccmd(['rev-parse', 'HEAD~1'],
collectStdout=True)
def _getMergeBase(self):
return self._dovccmd(['merge-base', 'HEAD', 'origin/trunk'],
collectStdout=True)
def _setLintVersion(self, version):
self.setProperty("lint_revision", version.strip(), "merge-forward")
def _dovccmd(self, command, abandonOnFailure=True, collectStdout=False, extra_args={}):
cmd = buildstep.RemoteShellCommand(self.workdir, ['git'] + command,
env=self.env,
logEnviron=self.logEnviron,
collectStdout=collectStdout,
**extra_args)
cmd.useLog(self.stdio_log, False)
d = self.runCommand(cmd)
def evaluateCommand(cmd):
if abandonOnFailure and cmd.rc != 0:
log.msg("Source step failed while running command %s" % cmd)
raise buildstep.BuildStepFailed()
if collectStdout:
return cmd.stdout
else:
return cmd.rc
d.addCallback(lambda _: evaluateCommand(cmd))
return d
class TwistedGit(Git):
"""
Temporary support for the transitionary stage between SVN and Git.
"""
def startVC(self, branch, revision, patch):
if not isSafeBranch(branch):
raise ValueError("No building on pull requests.")
return Git.startVC(self, branch, revision, patch)
| from twisted.python import log
from twisted.internet import defer
from buildbot.process import buildstep
from buildbot.steps.source import Source
from buildbot.steps.source.git import Git
from buildbot.status.results import SUCCESS
def mungeBranch(branch):
"""
Remove the leading prefix, that comes from svn branches.
"""
if not branch:
return 'trunk'
for cutoff in ['/branches/', 'branches/', '/']:
if branch.startswith(cutoff):
branch = branch[len(cutoff):]
break
return branch
def isTrunk(branch):
"""
Is the branch trunk?
"""
return mungeBranch(branch) == 'trunk'
def isRelease(branch):
"""
Is the branch a release branch?
"""
return mungeBranch(branch).startswith('release-')
def isSafeBranch(branch):
"""
Is this branch a safe branch? That is, is it on a real branch?
"""
return not mungeBranch(branch).startswith('refs/pull')
class MergeForward(Source):
"""
Merge with trunk.
"""
name = 'merge-forward'
description = ['merging', 'forward']
descriptionDone = ['merge', 'forward']
haltOnFailure = True
def __init__(self, repourl, branch='trunk', **kwargs):
self.repourl = repourl
self.branch = branch
kwargs['env'] = {
'GIT_AUTHOR_EMAIL': '<EMAIL>',
'GIT_AUTHOR_NAME': '<NAME>',
'GIT_COMMITTER_EMAIL': '<EMAIL>',
'GIT_COMMITTER_NAME': '<NAME>',
}
Source.__init__(self, **kwargs)
self.addFactoryArguments(repourl=repourl, branch=branch)
def startVC(self, branch, revision, patch):
if not isSafeBranch(branch):
raise ValueError("No building on pull requests.")
self.stdio_log = self.addLog('stdio')
self.step_status.setText(['merging', 'forward'])
d = defer.succeed(None)
if not isTrunk(branch):
d.addCallback(lambda _: self._fetch())
if not (isTrunk(branch) or isRelease(branch)):
d.addCallback(lambda _: self._merge())
if isTrunk(branch):
d.addCallback(lambda _: self._getPreviousVersion())
else:
d.addCallback(lambda _: self._getMergeBase())
d.addCallback(self._setLintVersion)
d.addCallback(lambda _: SUCCESS)
d.addCallbacks(self.finished, self.checkDisconnect)
d.addErrback(self.failed)
def finished(self, results):
if results == SUCCESS:
self.step_status.setText(['merge', 'forward'])
else:
self.step_status.setText(['merge', 'forward', 'failed'])
return Source.finished(self, results)
def _fetch(self):
d = self._dovccmd(['remote', 'set-url', 'origin', self.repourl])
d.addCallback(lambda _: self._dovccmd(['fetch', 'origin']))
return d
def _merge(self):
return self._dovccmd(['merge',
'--no-ff', '--no-stat',
'origin/trunk'])
def _getPreviousVersion(self):
return self._dovccmd(['rev-parse', 'HEAD~1'],
collectStdout=True)
def _getMergeBase(self):
return self._dovccmd(['merge-base', 'HEAD', 'origin/trunk'],
collectStdout=True)
def _setLintVersion(self, version):
self.setProperty("lint_revision", version.strip(), "merge-forward")
def _dovccmd(self, command, abandonOnFailure=True, collectStdout=False, extra_args={}):
cmd = buildstep.RemoteShellCommand(self.workdir, ['git'] + command,
env=self.env,
logEnviron=self.logEnviron,
collectStdout=collectStdout,
**extra_args)
cmd.useLog(self.stdio_log, False)
d = self.runCommand(cmd)
def evaluateCommand(cmd):
if abandonOnFailure and cmd.rc != 0:
log.msg("Source step failed while running command %s" % cmd)
raise buildstep.BuildStepFailed()
if collectStdout:
return cmd.stdout
else:
return cmd.rc
d.addCallback(lambda _: evaluateCommand(cmd))
return d
class TwistedGit(Git):
"""
Temporary support for the transitionary stage between SVN and Git.
"""
def startVC(self, branch, revision, patch):
if not isSafeBranch(branch):
raise ValueError("No building on pull requests.")
return Git.startVC(self, branch, revision, patch)
| en | 0.943484 | Remove the leading prefix, that comes from svn branches. Is the branch trunk? Is the branch a release branch? Is this branch a safe branch? That is, is it on a real branch? Merge with trunk. Temporary support for the transitionary stage between SVN and Git. | 2.245015 | 2 |
selfdrive/mapd/geo_util.py | openpilotkr/hikee9123 | 2 | 6630363 |
import numbers
import math
class GeoUtil:
@staticmethod
def degree2radius(degree):
return degree * (math.pi/180)
@staticmethod
def get_harversion_distance(x1,y1, x2,y2, round_decimal_digits=5):
"""
경위도 (x1,y1)과 (x2,y2) 점의 거리를 반환
harversion formula 이용하여 2개의 경위도간 거리를 구함(단위:km)
"""
if x1 is None or y1 is None or x2 is None or y2 is None:
return None
assert isinstance(x1, numbers.Number) and -180 <= x1 and x1 <= 180
assert isinstance(y1, numbers.Number) and -90 <= y1 and y1 <= 90
assert isinstance(x2, numbers.Number) and -180 <= x2 and x2 <= 180
assert isinstance(y2, numbers.Number) and -90 <= y2 and y2<= 90
R = 6371 # 지구의 반경(단위: km)
dLon = GeoUtil.degree2radius(x2-x1)
dLat = GeoUtil.degree2radius(y2-y1)
a = math.sin(dLat/2) * math.sin(dLat/2) \
+ (math.cos(GeoUtil.degree2radius(y1)) \
*math.cos(GeoUtil.degree2radius(y2)) \
*math.sin(dLon/2) * math.sin(dLon/2))
b = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
return round( R * b, round_decimal_digits)
@staticmethod
def get_euclidean_distance(x1,y1, x2,y2, round_decimal_digits=5):
"""
유클리안 formula 이용하여 (x1,y1)과 (x2,y2)점의 거리를 반환
"""
if x1 is None or y1 is None or x2 is None or y2 is None:
return None
assert isinstance(x1, numbers.Number) and -180 <= x1 and x1 <= 180
assert isinstance(y1, numbers.Number) and -90 <= y1 and y1 <= 90
assert isinstance(x2, numbers.Number) and -180 <= x2 and x2 <= 180
assert isinstance(y2, numbers.Number) and -90 <= y2 and y2<= 90
dLon = abs(x2-x1) # 경도 차이
if dLon >= 180:
dLon -= 360
dLat = y2-y1 # 위도 차이
return round( math.sqrt(pow(dLon,2) + pow(dLat,2)), round_decimal_digits)
camera_pos = {
(35.139425,128.820543):(190,50),
(35.327421,129.046001):(180,50),
(35.266681,128.750388):(90,50)
}
b = camera_pos[35.139425,128.820543] |
import numbers
import math
class GeoUtil:
@staticmethod
def degree2radius(degree):
return degree * (math.pi/180)
@staticmethod
def get_harversion_distance(x1,y1, x2,y2, round_decimal_digits=5):
"""
경위도 (x1,y1)과 (x2,y2) 점의 거리를 반환
harversion formula 이용하여 2개의 경위도간 거리를 구함(단위:km)
"""
if x1 is None or y1 is None or x2 is None or y2 is None:
return None
assert isinstance(x1, numbers.Number) and -180 <= x1 and x1 <= 180
assert isinstance(y1, numbers.Number) and -90 <= y1 and y1 <= 90
assert isinstance(x2, numbers.Number) and -180 <= x2 and x2 <= 180
assert isinstance(y2, numbers.Number) and -90 <= y2 and y2<= 90
R = 6371 # 지구의 반경(단위: km)
dLon = GeoUtil.degree2radius(x2-x1)
dLat = GeoUtil.degree2radius(y2-y1)
a = math.sin(dLat/2) * math.sin(dLat/2) \
+ (math.cos(GeoUtil.degree2radius(y1)) \
*math.cos(GeoUtil.degree2radius(y2)) \
*math.sin(dLon/2) * math.sin(dLon/2))
b = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
return round( R * b, round_decimal_digits)
@staticmethod
def get_euclidean_distance(x1,y1, x2,y2, round_decimal_digits=5):
"""
유클리안 formula 이용하여 (x1,y1)과 (x2,y2)점의 거리를 반환
"""
if x1 is None or y1 is None or x2 is None or y2 is None:
return None
assert isinstance(x1, numbers.Number) and -180 <= x1 and x1 <= 180
assert isinstance(y1, numbers.Number) and -90 <= y1 and y1 <= 90
assert isinstance(x2, numbers.Number) and -180 <= x2 and x2 <= 180
assert isinstance(y2, numbers.Number) and -90 <= y2 and y2<= 90
dLon = abs(x2-x1) # 경도 차이
if dLon >= 180:
dLon -= 360
dLat = y2-y1 # 위도 차이
return round( math.sqrt(pow(dLon,2) + pow(dLat,2)), round_decimal_digits)
camera_pos = {
(35.139425,128.820543):(190,50),
(35.327421,129.046001):(180,50),
(35.266681,128.750388):(90,50)
}
b = camera_pos[35.139425,128.820543] | ko | 0.999837 | 경위도 (x1,y1)과 (x2,y2) 점의 거리를 반환 harversion formula 이용하여 2개의 경위도간 거리를 구함(단위:km) # 지구의 반경(단위: km) 유클리안 formula 이용하여 (x1,y1)과 (x2,y2)점의 거리를 반환 # 경도 차이 # 위도 차이 | 3.281254 | 3 |
pyNastran/op2/tables/ogs_grid_point_stresses/ogs_surface_stresses.py | ACea15/pyNastran | 1 | 6630364 | <reponame>ACea15/pyNastran
import warnings
from typing import List
import numpy as np
from pyNastran.op2.result_objects.op2_objects import ScalarObject, get_times_dtype
from pyNastran.f06.f06_formatting import (
write_floats_10e, _eigenvalue_header)
class GridPointSurfaceArray(ScalarObject):
"""
' S T R E S S E S A T G R I D P O I N T S - - S U R F A C E 5\n',
'0 SURFACE X-AXIS X NORMAL(Z-AXIS) Z REFERENCE COORDINATE SYSTEM FOR SURFACE DEFINITION CID 0\n',
' GRID ELEMENT STRESSES IN SURFACE SYSTEM PRINCIPAL STRESSES MAX \n',
' ID ID FIBRE NORMAL-X NORMAL-Y SHEAR-XY ANGLE MAJOR MINOR SHEAR VON MISES\n']
'0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0'
' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0'
' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0'
"""
def __init__(self, data_code, is_sort1, isubcase, dt):
ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)
self.ntotal = 0
self.ntimes = 0
self.nelements = 0
self.itotal = 0
self.ielement = 0
self.data = None
self.itime = None
self.node_element = None
self.location = None
self._times = None
def _reset_indices(self) -> None:
self.itotal = 0
self.ielement = 0
@property
def is_real(self) -> bool:
return True
@property
def is_complex(self) -> bool:
return False
def build(self):
"""sizes the vectorized attributes of the GridPointStressesArray"""
if self.is_built:
return
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
self.itime = 0
self.ielement = 0
self.itotal = 0
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)
self.node_element = np.zeros((self.ntotal, 2), dtype=idtype)
#oxx, oyy, txy, angle, major, minor, ovm
self.data = np.zeros((self.ntimes, self.nelements, 8), dtype=fdtype)
self.location = np.empty(self.ntotal, dtype='U8')
self._times = np.zeros(self.ntimes, dtype=dtype)
def _write_table_3(self, op2_file, op2_ascii, new_result, itable, itime): #, itable=-3, itime=0):
import inspect
from struct import pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_table_3: %s\n' % (self.__class__.__name__, call_frame[1][3]))
#if itable == -3:
#print('*writing itable=%s' % itable)
if new_result and itable != -3:
header = [
4, 146, 4,
]
else:
header = [
4, itable, 4,
4, 1, 4,
4, 0, 4,
4, 146, 4,
]
op2_file.write(pack(b'%ii' % len(header), *header))
op2_ascii.write('table_3_header = %s\n' % header)
#op2_file.write(pack('12i', *header))
#else:
#print('***writing itable=%s' % itable)
#op2_file.write(pack('3i', *[
##4, itable, 4,
##4, 1, 4,
##4, 0, 4,
#4, 146, 4,
#]))
approach_code = self.approach_code
table_code = self.table_code
isubcase = self.isubcase
#[
#'aCode', 'tCode', 'element_type', 'isubcase',
#'???', '???', '???', 'load_set'
#'format_code', 'num_wide', 's_code', '???',
#'???', '???', '???', '???',
#'???', '???', '???', '???',
#'???', '???', '???', '???',
#'???', 'Title', 'subtitle', 'label']
#random_code = self.random_code
ogs = self.ogs
if ogs is None:
#print(''.join(self.get_stats()))
warnings.warn('ogs=0...')
ogs = 0
format_code = self.format_code
s_code = self.sCode
num_wide = self.num_wide
acoustic_flag = 0
thermal = 0
title = b'%-128s' % self.title.encode('ascii')
subtitle = b'%-128s' % self.subtitle.encode('ascii')
label = b'%-128s' % self.label.encode('ascii')
ftable3 = b'50i 128s 128s 128s'
unused_oCode = 0
ftable3 = b'i' * 50 + b'128s 128s 128s'
field6 = 0
field7 = 0
if self.analysis_code == 1:
field5 = self.lsdvmns[itime]
if np.isnan(field5): # poor sort2 -> sort1
raise RuntimeError('field5 in a static case is nan...; do you have SORT2?')
#field5 = 1
elif self.analysis_code == 2:
field5 = self.modes[itime]
field6 = self.eigns[itime]
field7 = self.cycles[itime]
assert isinstance(field6, float), type(field6)
assert isinstance(field7, float), type(field7)
ftable3 = set_table3_field(ftable3, 6, b'f') # field 6
ftable3 = set_table3_field(ftable3, 7, b'f') # field 7
#elif self.analysis_code == 3:
#field5 = self.freqs[itime]
elif self.analysis_code == 5:
field5 = self.freqs[itime]
ftable3 = set_table3_field(ftable3, 5, b'f') # field 5
elif self.analysis_code == 6:
field5 = self.dts[itime]
ftable3 = set_table3_field(ftable3, 5, b'f') # field 5
elif self.analysis_code == 7: # pre-buckling
field5 = self.lsdvmns[itime] # load set number
elif self.analysis_code == 8: # post-buckling
field5 = self.lsdvmns[itime] # load set number
#if hasattr(self, 'eigns'):
if hasattr(self, 'eigens'):
field6 = self.eigns[itime]
elif hasattr(self, 'eigrs'):
field6 = self.eigrs[itime]
else: # pragma: no cover
print(self.get_stats())
raise NotImplementedError('cant find eigns or eigrs on analysis_code=8')
ftable3 = set_table3_field(ftable3, 6, b'f') # field 6
elif self.analysis_code == 9: # complex eigenvalues
field5 = self.modes[itime]
if hasattr(self, 'eigns'):
field6 = self.eigns[itime]
elif hasattr(self, 'eigrs'):
field6 = self.eigrs[itime]
else: # pragma: no cover
print(self.get_stats())
raise NotImplementedError('cant find eigns or eigrs on analysis_code=9')
ftable3 = set_table3_field(ftable3, 6, b'f') # field 6
field7 = self.eigis[itime]
ftable3 = set_table3_field(ftable3, 7, b'f') # field 7
elif self.analysis_code == 10: # nonlinear statics
field5 = self.lftsfqs[itime]
ftable3 = set_table3_field(ftable3, 5, b'f') # field 5; load step
elif self.analysis_code == 11: # old geometric nonlinear statics
field5 = self.lsdvmns[itime] # load set number
else:
raise NotImplementedError(self.analysis_code)
#self.ogs = self.add_data_parameter(data, 'ogs_id', b'i', 3, False)
#self.refid = self.add_data_parameter(data, 'refid', b'i', 8, False)
#self.format_code = self.add_data_parameter(data, 'format_code', b'i', 9, False)
#self.num_wide = self.add_data_parameter(data, 'num_wide', b'i', 10, False)
#self.sCode = self.add_data_parameter(data, 'sCode', b'i', 11, False)
#self.oCoord = self.add_data_parameter(data, 'oCoord', b'i', 12, False)
#self.axis = self.add_data_parameter(data, 'axis', b'i', 13, False)
#self.normal = self.add_data_parameter(data, 'normal', b'i', 14, False)
table3 = [
approach_code, table_code, ogs, isubcase, field5,
field6, field7, self.refid, format_code, num_wide,
s_code, self.oCoord, self.axis, self.normal, 0,
0, 0, 0, 0, 0,
0, 0, thermal, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
title, subtitle, label,
]
assert table3[22] == thermal
n = 0
for i, v in enumerate(table3):
#print('write_table_3', i, v)
if isinstance(v, (int, float, np.int32, np.float32)):
n += 4
elif isinstance(v, str):
n += len(v)
else:
n += len(v)
assert n == 584, n
data = [584] + table3 + [584]
fmt = b'i' + ftable3 + b'i'
#print(fmt)
#print(data)
#f.write(pack(fascii, '%s header 3c' % self.table_name, fmt, data))
op2_ascii.write('%s header 3c = %s\n' % (self.table_name, data))
op2_file.write(pack(fmt, *data))
#def build_dataframe(self):
#"""creates a pandas dataframe"""
#import pandas as pd
#headers = self.get_headers()
#element_node = [self.element_node[:, 0], self.element_node[:, 1]]
#if self.nonlinear_factor not in (None, np.nan):
#column_names, column_values = self._build_dataframe_transient_header()
#self.data_frame = pd.Panel(self.data, items=column_values, major_axis=element_node, minor_axis=headers).to_frame()
#self.data_frame.columns.names = column_names
#else:
#self.data_frame = pd.Panel(self.data, major_axis=element_node, minor_axis=headers).to_frame()
#self.data_frame.columns.names = ['Static']
#self.data_frame.index.names = ['NodeID', 'ElementID', 'Item']
def add_sort1(self, dt, nid, eid, fiber, nx, ny, txy, angle, majorP, minorP, tmax, ovm):
"""unvectorized method for adding SORT1 transient data"""
#assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.node_element[self.itotal, :] = [nid, eid]
self.location[self.itotal] = fiber
self.data[self.itime, self.itotal, :] = [nx, ny, txy, angle, majorP, minorP, tmax, ovm]
self.itotal += 1
def get_stats(self, short: bool=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
f' ntimes: {self.ntimes:d}\n',
f' ntotal: {self.ntotal:d}\n',
]
ntimes, nelements, _ = self.data.shape
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(f' node_element.shape = {self.node_element.shape}\n')
msg.append(f' location.shape = {self.location.shape}\n')
msg.append(f' data.shape = {self.data.shape}\n')
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True):
if header is None:
header = []
cid = self.refid
axis_int = self.oCoord
axis_map = {0 : 'X', 1 : 'Y', 2 : 'Z'}
axis = axis_map[axis_int]
msg = self._get_f06_message(self.ogs_id, cid, axis)
ntimes = self.data.shape[0]
nids = self.node_element[:, 0]
eids = self.node_element[:, 1]
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg))
nx = self.data[itime, :, 0]
ny = self.data[itime, :, 1]
txy = self.data[itime, :, 2]
angle = self.data[itime, :, 3]
majorp = self.data[itime, :, 4]
minorp = self.data[itime, :, 5]
tmax = self.data[itime, :, 6]
ovm = self.data[itime, :, 7]
fibers = self.location
nid_old = -1
for (nid, eid, fiber, nxi, nyi, txyi, anglei, majorpi, minorpi, tmaxi, ovmi) in zip(
nids, eids, fibers, nx, ny, txy, angle, majorp, minorp, tmax, ovm):
[nxi, nyi, txyi, majorpi, minorpi, tmaxi, ovmi] = write_floats_10e([
nxi, nyi, txyi, majorpi, minorpi, tmaxi, ovmi])
if nid > nid_old:
f06_file.write(
'0%8s %8s %4s %-10s %-10s %-10s %8.4f %10s %10s %10s %s\n' % (
nid, eid, fiber, nxi, nyi, txyi, anglei, majorpi, minorpi,
tmaxi, ovmi))
else:
f06_file.write(
' %8s %8s %4s %-10s %-10s %-10s %8.4f %10s %10s %10s %s\n' % (
'', '', fiber, nxi, nyi, txyi, anglei, majorpi, minorpi,
tmaxi, ovmi))
nid_old = nid
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2_file, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write(f'{self.__class__.__name__}.write_op2: {call_frame[1][3]}\n')
if itable == -1:
#print('***************', itable)
self._write_table_header(op2_file, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
#eids2 = self.element_node[:, 0]
#nodes = self.element_node[:, 1]
#nelements_nodes = len(nodes)
#eids3 = self.element_cid[:, 0]
#cids3 = self.element_cid[:, 1]
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
#nelements = len(np.unique(eids2))
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
#nnodes_centroid = self.nnodes_per_element
#nnodes_no_centroid = self.nnodes_per_element_no_centroid
nnodes = self.data.shape[1]
#ntotali = 11
ntotali = self.num_wide
assert ntotali == 11, ntotali
ntotal = ntotali * nnodes
#print('shape = %s' % str(self.data.shape))
#assert nnodes > 1, nnodes
#assert self.ntimes == 1, self.ntimes
op2_ascii.write(f' ntimes = {self.ntimes}\n')
ntimes = self.ntimes
#print('ntotal=%s' % (ntotal))
if not self.is_sort1:
raise NotImplementedError('SORT2')
#op2_format = endian + b'2i6f'
#idtype = self.element_cid.dtype
fdtype = self.data.dtype
#print(self.size)
if self.size == 4:
grid_bytes = b'GRID'
else:
warnings.warn(f'downcasting {self.class_name}...')
idtype = np.int32(1)
fdtype = np.float32(1.0)
grid_bytes = b'GRID'
#[nids, eids, fibers, nx, ny, txy, angle, majorp, minorp, tmax, ovm]
nids = self.node_element[:, 0]
eids = self.node_element[:, 1]
nids_device = nids * 10 + self.device_code
nids_device
# speed up transient cases, but slightly slows down static cases
data_out = np.empty((nnodes, 11), dtype=fdtype)
# setting:
# - [nid_device, eids, location_bytes]
data_out[:, 0] = nids_device
data_out[:, 1] = eids
location_bytes = np.array([loc.encode('ascii') for loc in self.location])
data_out[:, 2] = location_bytes.view(fdtype)
#nx = self.data[itime, :, 0]
#ny = self.data[itime, :, 1]
#txy = self.data[itime, :, 2]
#angle = self.data[itime, :, 3]
#majorp = self.data[itime, :, 4]
#minorp = self.data[itime, :, 5]
#tmax = self.data[itime, :, 6]
#ovm = self.data[itime, :, 7]
#fibers = self.location
#cen_array = np.full(nelements, grid_bytes, dtype='|S4')
#nnodes_no_centroid_array = np.full(nelements, nnodes_no_centroid, dtype=idtype)
#element_wise_data = to_column_bytes([
#element_device, # ints
#cids3, # ints
#cen_array, # bytes
#nnodes_no_centroid_array, # ints
#], fdtype, debug=False)
# we could tack the nodes on, so we don't have to keep stacking it
# but we run into issues with datai
#
# total=nelements_nodes
#nodes_view = nodes.view(fdtype).reshape(nelements, nnodes_centroid)
#inode = np.arange(nnodes_centroid)
#data_out[:, 4+inode*21] = nodes_view[:, inode]
op2_ascii.write(f'nnodes={nnodes:d}\n')
struct_i = Struct('i')
struct_13i = Struct('13i')
for itime in range(self.ntimes):
self._write_table_3(op2_file, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2_file.write(struct_13i.pack(*header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write(f'r4 [4, {itable:d}, 4]\n')
op2_ascii.write(f'r4 [4, {4 * ntotal:d}, 4]\n')
# stack each output by columns and fix any dtypes
#datai2 = datai.reshape(nelements, 21*nnodes_centroid)
#data_out = np.hstack([element_wise_data, datai2])
#data_out[:, 4:] = datai2
# switch datai to element format and put it in the output buffer
data_out[:, 3:] = self.data[itime, :, :]
assert data_out.size == ntotal
op2_file.write(data_out)
itable -= 1
header = [4 * ntotal,]
op2_file.write(struct_i.pack(*header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for inid, (nid, eid) in enumerate(self.node_element):
t1 = self.data[itime, inid, :]
t2 = table.data[itime, inid, :]
(nx1, ny1, txy1, majorp1, minorp1, tmax1, ovm1) = t1
(nx2, ny2, txy2, majorp2, minorp2, tmax2, ovm2) = t2
if not np.allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s %s\n (%s, %s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s, %s)\n' % (
nid, eid,
nx1, ny1, txy1, majorp1, minorp1, tmax1, ovm1,
nx2, ny2, txy2, majorp2, minorp2, tmax2, ovm2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def _get_f06_message(self, ogs_id: int, cid: int, axis: str) -> List[str]:
raise NotImplementedError()
class GridPointSurfaceStressesArray(GridPointSurfaceArray):
def get_headers(self) -> List[str]:
headers = ['nx', 'ny', 'txy', 'angle', 'majorP', 'minorP', 'tmax', 'ovm']
return headers
def _get_f06_message(self, ogs_id: int, cid: int, axis: str) -> List[str]:
msg = [
f' S T R E S S E S A T G R I D P O I N T S - - S U R F A C E {ogs_id:d}\n',
f'0 SURFACE X-AXIS X NORMAL(Z-AXIS) {axis} REFERENCE COORDINATE SYSTEM FOR SURFACE DEFINITION CID {cid}\n',
' GRID ELEMENT STRESSES IN SURFACE SYSTEM PRINCIPAL STRESSES MAX \n',
' ID ID FIBRE NORMAL-X NORMAL-Y SHEAR-XY ANGLE MAJOR MINOR SHEAR VON MISES\n']
#'0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0'
#' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0'
#' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0'
return msg
class GridPointSurfaceStrainsArray(GridPointSurfaceArray):
def get_headers(self) -> List[str]:
headers = ['nx', 'ny', 'exy', 'angle', 'majorP', 'minorP', 'emax', 'evm']
return headers
def _get_f06_message(self, ogs_id: int, cid: int, axis: str) -> List[str]:
msg = [
f' S T R A I N S A T G R I D P O I N T S - - S U R F A C E {ogs_id:d}\n',
#f' S T R E S S E S A T G R I D P O I N T S - - S U R F A C E {ogs_id:d}\n',
f'0 SURFACE X-AXIS X NORMAL(Z-AXIS) {axis} REFERENCE COORDINATE SYSTEM FOR SURFACE DEFINITION CID {cid}\n',
#' GRID ELEMENT STRESSES IN SURFACE SYSTEM PRINCIPAL STRESSES MAX \n',
' GRID ELEMENT STRAINS IN SURFACE SYSTEM PRINCIPAL STRAINS MAX \n',
' ID ID FIBRE NORMAL-X NORMAL-Y SHEAR-XY ANGLE MAJOR MINOR SHEAR VON MISES\n']
#'0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0'
#' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0'
#' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0'
return msg
class GridPointStressesVolumePrincipalArray(ScalarObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)
self.ntotal = 0
self.ntimes = 0
self.nelements = 0
self.itotal = 0
self.ielement = 0
self.data = None
self.itime = None
self._times = None
def get_headers(self) -> List[str]:
headers = [
'lxa', 'lxb', 'lxc',
'lya', 'lyb', 'lyc',
'lza', 'lzb', 'lzc',
'sa', 'sb', 'sc',
'epr', 'ovm']
return headers
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for inid, nid in enumerate(self.node):
t1 = self.data[itime, inid, :]
t2 = table.data[itime, inid, :]
(lxa1, lxb1, lxc1, lya1, lyb1, lyc1, lza1, lzb1, lzc1, sa1, sb1, sc1, epr1, ovm1) = t1
(lxa2, lxb2, lxc2, lya2, lyb2, lyc2, lza2, lzb2, lzc2, sa2, sb2, sc2, epr2, ovm2) = t2
if not np.allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s, %s)\n' % (
nid,
lxa1, lxb1, lxc1, lya1, lyb1, lyc1, lza1,
lxa2, lxb2, lxc2, lya2, lyb2, lyc2, lza2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def _reset_indices(self) -> None:
self.itotal = 0
self.ielement = 0
@property
def is_real(self) -> bool:
return True
@property
def is_complex(self) -> bool:
return False
def build(self):
"""sizes the vectorized attributes of the GridPointStressesArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
#print('self.IDs', self.data)
self.itime = 0
self.ielement = 0
self.itotal = 0
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
self.nelements //= self.ntimes
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)
self.node = np.zeros(self.ntotal, dtype=idtype)
#lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm
self.data = np.zeros((self.ntimes, self.ntotal, 14), dtype=fdtype)
self.location = np.empty(self.ntotal, dtype='U8')
self._times = np.zeros(self.ntimes, dtype=dtype)
def get_stats(self, short: bool=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
f' ntimes: {self.ntimes:d}\n',
f' ntotal: {self.ntotal:d}\n',
]
ntimes, nelements, _ = self.data.shape
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(f' node.shape = {self.node.shape}\n')
msg.append(f' location.shape = {self.location.shape}\n')
msg.append(f' data.shape = {self.data.shape}\n')
msg += self.get_data_code()
return msg
def add_sort1(self, dt, nid, lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm):
assert isinstance(nid, int) and nid > 0, 'dt=%s nid=%s' % (dt, nid)
self._times[self.itime] = dt
self.node[self.itotal] = nid
self.data[self.itime, self.itotal, :] = [lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm]
self.itotal += 1
#def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
#page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True):
#pass
class GridPointStressesVolumeDirectArray(ScalarObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)
self.ntotal = 0
self.ntimes = 0
self.nelements = 0
self.itotal = 0
self.ielement = 0
self.data = None
self.itime = None
self._times = None
def get_headers(self) -> List[str]:
headers = ['ox', 'oy', 'oz', 'txy', 'tyz', 'txz', 'pressure', 'ovm']
return headers
def _reset_indices(self) -> None:
self.itotal = 0
self.ielement = 0
@property
def is_real(self) -> bool:
return True
@property
def is_complex(self) -> bool:
return False
def build(self):
"""sizes the vectorized attributes of the GridPointStressesArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
#print('self.IDs', self.data)
self.itime = 0
self.ielement = 0
self.itotal = 0
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
self.nelements //= self.ntimes
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)
self.node = np.zeros(self.ntotal, dtype=idtype)
#oxx, oyy, txy, angle, major, minor, ovm
self.data = np.zeros((self.ntimes, self.ntotal, 8), dtype=fdtype)
self.location = np.empty(self.ntotal, dtype='U8')
self._times = np.zeros(self.ntimes, dtype=dtype)
def get_stats(self, short: bool=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
f' ntimes: {self.ntimes:d}\n',
f' ntotal: {self.ntotal:d}\n',
]
ntimes, nelements, _ = self.data.shape
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(f' node.shape = {self.node.shape}\n')
msg.append(f' location.shape = {self.location.shape}\n')
msg.append(f' data.shape = {self.data.shape}\n')
msg += self.get_data_code()
return msg
def add_sort1(self, dt, nid, nx, ny, nz, txy, tyz, txz, pressure, ovm):
assert isinstance(nid, int) and nid > 0, 'dt=%s nid=%s' % (dt, nid)
self._times[self.itime] = dt
self.node[self.itotal] = nid
self.data[self.itime, self.itotal, :] = [nx, ny, nz, txy, tyz, txz, pressure, ovm]
self.itotal += 1
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True):
"""
' D I R E C T S T R E S S E S A T G R I D P O I N T S - - V O L U M E 101'
' OUTPUT COORDINATE SYSTEM = 0 BASIC '
' GRID NORMAL-X NORMAL-Y NORMAL-Z SHEAR-XY SHEAR-YZ SHEAR-ZX MEAN VON MISES'
' ID PRESSURE'
' 1 1.455E+03 -1.548E+02 -2.927E+02 -1.573E+01 3.326E+01 -3.438E+03 -3.357E+02 6.188E+03'
' 2 1.093E+03 -1.996E+02 -1.682E+02 1.542E+02 5.962E+01 -4.104E+03 -2.417E+02 7.227E+03'
"""
if header is None:
header = []
cid = self.refid
#axis_int = self.oCoord
#axis_map = {0 : 'X', 1 : 'Y', 2 : 'Z'}
#axis = axis_map[axis_int]
msg = [
' D I R E C T S T R E S S E S A T G R I D P O I N T S - - V O L U M E %3i\n'
' OUTPUT COORDINATE SYSTEM = %7i ELEMENT \n'
' GRID NORMAL-X NORMAL-Y NORMAL-Z SHEAR-XY SHEAR-YZ SHEAR-ZX MEAN VON MISES\n'
' ID PRESSURE\n' % (
#' 8086 6.136E-02 2.131E-01 8.353E-02 -2.268E+00 -2.274E-13 1.525E-13 -1.193E-01 3.930E+00'
self.ogs_id, cid)
]
ntimes = self.data.shape[0]
nids = self.node
zero = ' '
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg))
nx = self.data[itime, :, 0]
ny = self.data[itime, :, 1]
nz = self.data[itime, :, 2]
txy = self.data[itime, :, 3]
tyz = self.data[itime, :, 4]
txz = self.data[itime, :, 5]
pressure = self.data[itime, :, 6]
ovm = self.data[itime, :, 7]
for (nid, nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi) in zip(
nids, nx, ny, nz, txy, tyz, txz, pressure, ovm):
[nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi] = write_floats_10e([
nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi])
f06_file.write('%s%8s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-s\n' % (
zero, nid, nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi.rstrip()))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for inid, nid in enumerate(self.node):
t1 = self.data[itime, inid, :]
t2 = table.data[itime, inid, :]
(nx1, ny1, nz1, txy1, tyz1, txz1, pressure1, ovm1) = t1
(nx2, ny2, nz2, txy2, tyz2, txz2, pressure2, ovm2) = t2
if not np.allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s, %s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s, %s, %s)\n' % (
nid,
nx1, ny1, nz1, txy1, tyz1, txz1, pressure1, ovm1,
nx2, ny2, nz2, txy2, tyz2, txz2, pressure2, ovm2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
#msg = [
#' P R I N C I P A L G R I D P O I N T S T R E S S D I S C O N T I N U I T I E S - - V O L U M E %s\n'
#' OUTPUT COORDINATE SYSTEM = %7i ELEMENT \n'
#' GRID PRINCIPAL STRESS DISCONTINUITY MEAN VON MISES ERROR\n'
#' ID A B C PRESSURE EST.\n' % (
#ivolume, cid)
#' 8086 5.448E-09 9.886E-08 2.026E-15 2.484E-09 1.086E-07 5.716E-08'
#]
# not sure what result this is for
#zero = ' '
#f06_file.write('%s%8s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-s\n' % (
#zero, nid, nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi.rstrip()))
GridPointStressesVolumeDiscontinutiesArray = None # tCode=34
class GridPointStressesSurfaceDiscontinutiesArray(ScalarObject): # tCode=35
def __init__(self, data_code, is_sort1, isubcase, dt):
ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)
self.ntotal = 0
self.ntimes = 0
self.nelements = 0
self.itotal = 0
self.ielement = 0
self.data = None
self.itime = None
#self.node_element = None
self._times = None
def get_headers(self) -> List[str]:
headers = ['oxx', 'oyy', 'ozz', 'txy', 'pressure']
return headers
def _reset_indices(self) -> None:
self.itotal = 0
self.ielement = 0
@property
def is_real(self) -> bool:
return True
@property
def is_complex(self) -> bool:
return False
def build(self):
"""sizes the vectorized attributes of the GridPointStressesArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
#print('self.IDs', self.data)
self.itime = 0
self.ielement = 0
self.itotal = 0
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.node = np.zeros(self.ntotal, dtype='int32')
#oxx, oyy, ozz, txy, pressure
self.data = np.zeros((self.ntimes, self.ntotal, 5), dtype='float32')
self.location = np.empty(self.ntotal, dtype='U8')
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)
self._times = np.zeros(self.ntimes, dtype=dtype)
def get_stats(self, short: bool=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
f' ntimes: {self.ntimes:d}\n',
f' ntotal: {self.ntotal:d}\n',
]
ntimes, nelements, _ = self.data.shape
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(f' node.shape = {self.node.shape}\n')
msg.append(f' location.shape = {self.location.shape}\n')
msg.append(f' data.shape = {self.data.shape}\n')
msg += self.get_data_code()
return msg
def add_sort1(self, dt, nid, oxx, oyy, ozz, txy, pressure):
assert isinstance(nid, int) and nid > 0, 'dt=%s nid=%s' % (dt, nid)
self._times[self.itime] = dt
self.node[self.itotal] = nid
self.data[self.itime, self.itotal, :] = [oxx, oyy, ozz, txy, pressure]
self.itotal += 1
class GridPointStrainsVolumePrincipalArray(GridPointStressesVolumePrincipalArray):
pass
class GridPointStrainsVolumeDirectArray(GridPointStressesVolumeDirectArray):
pass
GridPointStrainsVolumeDiscontinutiesArray = None
class GridPointStrainsSurfaceDiscontinutiesArray(GridPointStressesSurfaceDiscontinutiesArray):
pass
| import warnings
from typing import List
import numpy as np
from pyNastran.op2.result_objects.op2_objects import ScalarObject, get_times_dtype
from pyNastran.f06.f06_formatting import (
write_floats_10e, _eigenvalue_header)
class GridPointSurfaceArray(ScalarObject):
"""
' S T R E S S E S A T G R I D P O I N T S - - S U R F A C E 5\n',
'0 SURFACE X-AXIS X NORMAL(Z-AXIS) Z REFERENCE COORDINATE SYSTEM FOR SURFACE DEFINITION CID 0\n',
' GRID ELEMENT STRESSES IN SURFACE SYSTEM PRINCIPAL STRESSES MAX \n',
' ID ID FIBRE NORMAL-X NORMAL-Y SHEAR-XY ANGLE MAJOR MINOR SHEAR VON MISES\n']
'0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0'
' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0'
' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0'
"""
def __init__(self, data_code, is_sort1, isubcase, dt):
ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)
self.ntotal = 0
self.ntimes = 0
self.nelements = 0
self.itotal = 0
self.ielement = 0
self.data = None
self.itime = None
self.node_element = None
self.location = None
self._times = None
def _reset_indices(self) -> None:
self.itotal = 0
self.ielement = 0
@property
def is_real(self) -> bool:
return True
@property
def is_complex(self) -> bool:
return False
def build(self):
"""sizes the vectorized attributes of the GridPointStressesArray"""
if self.is_built:
return
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
self.itime = 0
self.ielement = 0
self.itotal = 0
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)
self.node_element = np.zeros((self.ntotal, 2), dtype=idtype)
#oxx, oyy, txy, angle, major, minor, ovm
self.data = np.zeros((self.ntimes, self.nelements, 8), dtype=fdtype)
self.location = np.empty(self.ntotal, dtype='U8')
self._times = np.zeros(self.ntimes, dtype=dtype)
def _write_table_3(self, op2_file, op2_ascii, new_result, itable, itime): #, itable=-3, itime=0):
import inspect
from struct import pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_table_3: %s\n' % (self.__class__.__name__, call_frame[1][3]))
#if itable == -3:
#print('*writing itable=%s' % itable)
if new_result and itable != -3:
header = [
4, 146, 4,
]
else:
header = [
4, itable, 4,
4, 1, 4,
4, 0, 4,
4, 146, 4,
]
op2_file.write(pack(b'%ii' % len(header), *header))
op2_ascii.write('table_3_header = %s\n' % header)
#op2_file.write(pack('12i', *header))
#else:
#print('***writing itable=%s' % itable)
#op2_file.write(pack('3i', *[
##4, itable, 4,
##4, 1, 4,
##4, 0, 4,
#4, 146, 4,
#]))
approach_code = self.approach_code
table_code = self.table_code
isubcase = self.isubcase
#[
#'aCode', 'tCode', 'element_type', 'isubcase',
#'???', '???', '???', 'load_set'
#'format_code', 'num_wide', 's_code', '???',
#'???', '???', '???', '???',
#'???', '???', '???', '???',
#'???', '???', '???', '???',
#'???', 'Title', 'subtitle', 'label']
#random_code = self.random_code
ogs = self.ogs
if ogs is None:
#print(''.join(self.get_stats()))
warnings.warn('ogs=0...')
ogs = 0
format_code = self.format_code
s_code = self.sCode
num_wide = self.num_wide
acoustic_flag = 0
thermal = 0
title = b'%-128s' % self.title.encode('ascii')
subtitle = b'%-128s' % self.subtitle.encode('ascii')
label = b'%-128s' % self.label.encode('ascii')
ftable3 = b'50i 128s 128s 128s'
unused_oCode = 0
ftable3 = b'i' * 50 + b'128s 128s 128s'
field6 = 0
field7 = 0
if self.analysis_code == 1:
field5 = self.lsdvmns[itime]
if np.isnan(field5): # poor sort2 -> sort1
raise RuntimeError('field5 in a static case is nan...; do you have SORT2?')
#field5 = 1
elif self.analysis_code == 2:
field5 = self.modes[itime]
field6 = self.eigns[itime]
field7 = self.cycles[itime]
assert isinstance(field6, float), type(field6)
assert isinstance(field7, float), type(field7)
ftable3 = set_table3_field(ftable3, 6, b'f') # field 6
ftable3 = set_table3_field(ftable3, 7, b'f') # field 7
#elif self.analysis_code == 3:
#field5 = self.freqs[itime]
elif self.analysis_code == 5:
field5 = self.freqs[itime]
ftable3 = set_table3_field(ftable3, 5, b'f') # field 5
elif self.analysis_code == 6:
field5 = self.dts[itime]
ftable3 = set_table3_field(ftable3, 5, b'f') # field 5
elif self.analysis_code == 7: # pre-buckling
field5 = self.lsdvmns[itime] # load set number
elif self.analysis_code == 8: # post-buckling
field5 = self.lsdvmns[itime] # load set number
#if hasattr(self, 'eigns'):
if hasattr(self, 'eigens'):
field6 = self.eigns[itime]
elif hasattr(self, 'eigrs'):
field6 = self.eigrs[itime]
else: # pragma: no cover
print(self.get_stats())
raise NotImplementedError('cant find eigns or eigrs on analysis_code=8')
ftable3 = set_table3_field(ftable3, 6, b'f') # field 6
elif self.analysis_code == 9: # complex eigenvalues
field5 = self.modes[itime]
if hasattr(self, 'eigns'):
field6 = self.eigns[itime]
elif hasattr(self, 'eigrs'):
field6 = self.eigrs[itime]
else: # pragma: no cover
print(self.get_stats())
raise NotImplementedError('cant find eigns or eigrs on analysis_code=9')
ftable3 = set_table3_field(ftable3, 6, b'f') # field 6
field7 = self.eigis[itime]
ftable3 = set_table3_field(ftable3, 7, b'f') # field 7
elif self.analysis_code == 10: # nonlinear statics
field5 = self.lftsfqs[itime]
ftable3 = set_table3_field(ftable3, 5, b'f') # field 5; load step
elif self.analysis_code == 11: # old geometric nonlinear statics
field5 = self.lsdvmns[itime] # load set number
else:
raise NotImplementedError(self.analysis_code)
#self.ogs = self.add_data_parameter(data, 'ogs_id', b'i', 3, False)
#self.refid = self.add_data_parameter(data, 'refid', b'i', 8, False)
#self.format_code = self.add_data_parameter(data, 'format_code', b'i', 9, False)
#self.num_wide = self.add_data_parameter(data, 'num_wide', b'i', 10, False)
#self.sCode = self.add_data_parameter(data, 'sCode', b'i', 11, False)
#self.oCoord = self.add_data_parameter(data, 'oCoord', b'i', 12, False)
#self.axis = self.add_data_parameter(data, 'axis', b'i', 13, False)
#self.normal = self.add_data_parameter(data, 'normal', b'i', 14, False)
table3 = [
approach_code, table_code, ogs, isubcase, field5,
field6, field7, self.refid, format_code, num_wide,
s_code, self.oCoord, self.axis, self.normal, 0,
0, 0, 0, 0, 0,
0, 0, thermal, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
title, subtitle, label,
]
assert table3[22] == thermal
n = 0
for i, v in enumerate(table3):
#print('write_table_3', i, v)
if isinstance(v, (int, float, np.int32, np.float32)):
n += 4
elif isinstance(v, str):
n += len(v)
else:
n += len(v)
assert n == 584, n
data = [584] + table3 + [584]
fmt = b'i' + ftable3 + b'i'
#print(fmt)
#print(data)
#f.write(pack(fascii, '%s header 3c' % self.table_name, fmt, data))
op2_ascii.write('%s header 3c = %s\n' % (self.table_name, data))
op2_file.write(pack(fmt, *data))
#def build_dataframe(self):
#"""creates a pandas dataframe"""
#import pandas as pd
#headers = self.get_headers()
#element_node = [self.element_node[:, 0], self.element_node[:, 1]]
#if self.nonlinear_factor not in (None, np.nan):
#column_names, column_values = self._build_dataframe_transient_header()
#self.data_frame = pd.Panel(self.data, items=column_values, major_axis=element_node, minor_axis=headers).to_frame()
#self.data_frame.columns.names = column_names
#else:
#self.data_frame = pd.Panel(self.data, major_axis=element_node, minor_axis=headers).to_frame()
#self.data_frame.columns.names = ['Static']
#self.data_frame.index.names = ['NodeID', 'ElementID', 'Item']
def add_sort1(self, dt, nid, eid, fiber, nx, ny, txy, angle, majorP, minorP, tmax, ovm):
"""unvectorized method for adding SORT1 transient data"""
#assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.node_element[self.itotal, :] = [nid, eid]
self.location[self.itotal] = fiber
self.data[self.itime, self.itotal, :] = [nx, ny, txy, angle, majorP, minorP, tmax, ovm]
self.itotal += 1
def get_stats(self, short: bool=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
f' ntimes: {self.ntimes:d}\n',
f' ntotal: {self.ntotal:d}\n',
]
ntimes, nelements, _ = self.data.shape
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(f' node_element.shape = {self.node_element.shape}\n')
msg.append(f' location.shape = {self.location.shape}\n')
msg.append(f' data.shape = {self.data.shape}\n')
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True):
if header is None:
header = []
cid = self.refid
axis_int = self.oCoord
axis_map = {0 : 'X', 1 : 'Y', 2 : 'Z'}
axis = axis_map[axis_int]
msg = self._get_f06_message(self.ogs_id, cid, axis)
ntimes = self.data.shape[0]
nids = self.node_element[:, 0]
eids = self.node_element[:, 1]
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg))
nx = self.data[itime, :, 0]
ny = self.data[itime, :, 1]
txy = self.data[itime, :, 2]
angle = self.data[itime, :, 3]
majorp = self.data[itime, :, 4]
minorp = self.data[itime, :, 5]
tmax = self.data[itime, :, 6]
ovm = self.data[itime, :, 7]
fibers = self.location
nid_old = -1
for (nid, eid, fiber, nxi, nyi, txyi, anglei, majorpi, minorpi, tmaxi, ovmi) in zip(
nids, eids, fibers, nx, ny, txy, angle, majorp, minorp, tmax, ovm):
[nxi, nyi, txyi, majorpi, minorpi, tmaxi, ovmi] = write_floats_10e([
nxi, nyi, txyi, majorpi, minorpi, tmaxi, ovmi])
if nid > nid_old:
f06_file.write(
'0%8s %8s %4s %-10s %-10s %-10s %8.4f %10s %10s %10s %s\n' % (
nid, eid, fiber, nxi, nyi, txyi, anglei, majorpi, minorpi,
tmaxi, ovmi))
else:
f06_file.write(
' %8s %8s %4s %-10s %-10s %-10s %8.4f %10s %10s %10s %s\n' % (
'', '', fiber, nxi, nyi, txyi, anglei, majorpi, minorpi,
tmaxi, ovmi))
nid_old = nid
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2_file, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write(f'{self.__class__.__name__}.write_op2: {call_frame[1][3]}\n')
if itable == -1:
#print('***************', itable)
self._write_table_header(op2_file, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
#eids2 = self.element_node[:, 0]
#nodes = self.element_node[:, 1]
#nelements_nodes = len(nodes)
#eids3 = self.element_cid[:, 0]
#cids3 = self.element_cid[:, 1]
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
#nelements = len(np.unique(eids2))
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
#nnodes_centroid = self.nnodes_per_element
#nnodes_no_centroid = self.nnodes_per_element_no_centroid
nnodes = self.data.shape[1]
#ntotali = 11
ntotali = self.num_wide
assert ntotali == 11, ntotali
ntotal = ntotali * nnodes
#print('shape = %s' % str(self.data.shape))
#assert nnodes > 1, nnodes
#assert self.ntimes == 1, self.ntimes
op2_ascii.write(f' ntimes = {self.ntimes}\n')
ntimes = self.ntimes
#print('ntotal=%s' % (ntotal))
if not self.is_sort1:
raise NotImplementedError('SORT2')
#op2_format = endian + b'2i6f'
#idtype = self.element_cid.dtype
fdtype = self.data.dtype
#print(self.size)
if self.size == 4:
grid_bytes = b'GRID'
else:
warnings.warn(f'downcasting {self.class_name}...')
idtype = np.int32(1)
fdtype = np.float32(1.0)
grid_bytes = b'GRID'
#[nids, eids, fibers, nx, ny, txy, angle, majorp, minorp, tmax, ovm]
nids = self.node_element[:, 0]
eids = self.node_element[:, 1]
nids_device = nids * 10 + self.device_code
nids_device
# speed up transient cases, but slightly slows down static cases
data_out = np.empty((nnodes, 11), dtype=fdtype)
# setting:
# - [nid_device, eids, location_bytes]
data_out[:, 0] = nids_device
data_out[:, 1] = eids
location_bytes = np.array([loc.encode('ascii') for loc in self.location])
data_out[:, 2] = location_bytes.view(fdtype)
#nx = self.data[itime, :, 0]
#ny = self.data[itime, :, 1]
#txy = self.data[itime, :, 2]
#angle = self.data[itime, :, 3]
#majorp = self.data[itime, :, 4]
#minorp = self.data[itime, :, 5]
#tmax = self.data[itime, :, 6]
#ovm = self.data[itime, :, 7]
#fibers = self.location
#cen_array = np.full(nelements, grid_bytes, dtype='|S4')
#nnodes_no_centroid_array = np.full(nelements, nnodes_no_centroid, dtype=idtype)
#element_wise_data = to_column_bytes([
#element_device, # ints
#cids3, # ints
#cen_array, # bytes
#nnodes_no_centroid_array, # ints
#], fdtype, debug=False)
# we could tack the nodes on, so we don't have to keep stacking it
# but we run into issues with datai
#
# total=nelements_nodes
#nodes_view = nodes.view(fdtype).reshape(nelements, nnodes_centroid)
#inode = np.arange(nnodes_centroid)
#data_out[:, 4+inode*21] = nodes_view[:, inode]
op2_ascii.write(f'nnodes={nnodes:d}\n')
struct_i = Struct('i')
struct_13i = Struct('13i')
for itime in range(self.ntimes):
self._write_table_3(op2_file, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2_file.write(struct_13i.pack(*header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write(f'r4 [4, {itable:d}, 4]\n')
op2_ascii.write(f'r4 [4, {4 * ntotal:d}, 4]\n')
# stack each output by columns and fix any dtypes
#datai2 = datai.reshape(nelements, 21*nnodes_centroid)
#data_out = np.hstack([element_wise_data, datai2])
#data_out[:, 4:] = datai2
# switch datai to element format and put it in the output buffer
data_out[:, 3:] = self.data[itime, :, :]
assert data_out.size == ntotal
op2_file.write(data_out)
itable -= 1
header = [4 * ntotal,]
op2_file.write(struct_i.pack(*header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for inid, (nid, eid) in enumerate(self.node_element):
t1 = self.data[itime, inid, :]
t2 = table.data[itime, inid, :]
(nx1, ny1, txy1, majorp1, minorp1, tmax1, ovm1) = t1
(nx2, ny2, txy2, majorp2, minorp2, tmax2, ovm2) = t2
if not np.allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s %s\n (%s, %s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s, %s)\n' % (
nid, eid,
nx1, ny1, txy1, majorp1, minorp1, tmax1, ovm1,
nx2, ny2, txy2, majorp2, minorp2, tmax2, ovm2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def _get_f06_message(self, ogs_id: int, cid: int, axis: str) -> List[str]:
raise NotImplementedError()
class GridPointSurfaceStressesArray(GridPointSurfaceArray):
def get_headers(self) -> List[str]:
headers = ['nx', 'ny', 'txy', 'angle', 'majorP', 'minorP', 'tmax', 'ovm']
return headers
def _get_f06_message(self, ogs_id: int, cid: int, axis: str) -> List[str]:
msg = [
f' S T R E S S E S A T G R I D P O I N T S - - S U R F A C E {ogs_id:d}\n',
f'0 SURFACE X-AXIS X NORMAL(Z-AXIS) {axis} REFERENCE COORDINATE SYSTEM FOR SURFACE DEFINITION CID {cid}\n',
' GRID ELEMENT STRESSES IN SURFACE SYSTEM PRINCIPAL STRESSES MAX \n',
' ID ID FIBRE NORMAL-X NORMAL-Y SHEAR-XY ANGLE MAJOR MINOR SHEAR VON MISES\n']
#'0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0'
#' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0'
#' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0'
return msg
class GridPointSurfaceStrainsArray(GridPointSurfaceArray):
def get_headers(self) -> List[str]:
headers = ['nx', 'ny', 'exy', 'angle', 'majorP', 'minorP', 'emax', 'evm']
return headers
def _get_f06_message(self, ogs_id: int, cid: int, axis: str) -> List[str]:
msg = [
f' S T R A I N S A T G R I D P O I N T S - - S U R F A C E {ogs_id:d}\n',
#f' S T R E S S E S A T G R I D P O I N T S - - S U R F A C E {ogs_id:d}\n',
f'0 SURFACE X-AXIS X NORMAL(Z-AXIS) {axis} REFERENCE COORDINATE SYSTEM FOR SURFACE DEFINITION CID {cid}\n',
#' GRID ELEMENT STRESSES IN SURFACE SYSTEM PRINCIPAL STRESSES MAX \n',
' GRID ELEMENT STRAINS IN SURFACE SYSTEM PRINCIPAL STRAINS MAX \n',
' ID ID FIBRE NORMAL-X NORMAL-Y SHEAR-XY ANGLE MAJOR MINOR SHEAR VON MISES\n']
#'0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0'
#' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0'
#' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0'
return msg
class GridPointStressesVolumePrincipalArray(ScalarObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)
self.ntotal = 0
self.ntimes = 0
self.nelements = 0
self.itotal = 0
self.ielement = 0
self.data = None
self.itime = None
self._times = None
def get_headers(self) -> List[str]:
headers = [
'lxa', 'lxb', 'lxc',
'lya', 'lyb', 'lyc',
'lza', 'lzb', 'lzc',
'sa', 'sb', 'sc',
'epr', 'ovm']
return headers
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for inid, nid in enumerate(self.node):
t1 = self.data[itime, inid, :]
t2 = table.data[itime, inid, :]
(lxa1, lxb1, lxc1, lya1, lyb1, lyc1, lza1, lzb1, lzc1, sa1, sb1, sc1, epr1, ovm1) = t1
(lxa2, lxb2, lxc2, lya2, lyb2, lyc2, lza2, lzb2, lzc2, sa2, sb2, sc2, epr2, ovm2) = t2
if not np.allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s, %s)\n' % (
nid,
lxa1, lxb1, lxc1, lya1, lyb1, lyc1, lza1,
lxa2, lxb2, lxc2, lya2, lyb2, lyc2, lza2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def _reset_indices(self) -> None:
self.itotal = 0
self.ielement = 0
@property
def is_real(self) -> bool:
return True
@property
def is_complex(self) -> bool:
return False
def build(self):
"""sizes the vectorized attributes of the GridPointStressesArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
#print('self.IDs', self.data)
self.itime = 0
self.ielement = 0
self.itotal = 0
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
self.nelements //= self.ntimes
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)
self.node = np.zeros(self.ntotal, dtype=idtype)
#lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm
self.data = np.zeros((self.ntimes, self.ntotal, 14), dtype=fdtype)
self.location = np.empty(self.ntotal, dtype='U8')
self._times = np.zeros(self.ntimes, dtype=dtype)
def get_stats(self, short: bool=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
f' ntimes: {self.ntimes:d}\n',
f' ntotal: {self.ntotal:d}\n',
]
ntimes, nelements, _ = self.data.shape
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(f' node.shape = {self.node.shape}\n')
msg.append(f' location.shape = {self.location.shape}\n')
msg.append(f' data.shape = {self.data.shape}\n')
msg += self.get_data_code()
return msg
def add_sort1(self, dt, nid, lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm):
assert isinstance(nid, int) and nid > 0, 'dt=%s nid=%s' % (dt, nid)
self._times[self.itime] = dt
self.node[self.itotal] = nid
self.data[self.itime, self.itotal, :] = [lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm]
self.itotal += 1
#def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
#page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True):
#pass
class GridPointStressesVolumeDirectArray(ScalarObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)
self.ntotal = 0
self.ntimes = 0
self.nelements = 0
self.itotal = 0
self.ielement = 0
self.data = None
self.itime = None
self._times = None
def get_headers(self) -> List[str]:
headers = ['ox', 'oy', 'oz', 'txy', 'tyz', 'txz', 'pressure', 'ovm']
return headers
def _reset_indices(self) -> None:
self.itotal = 0
self.ielement = 0
@property
def is_real(self) -> bool:
return True
@property
def is_complex(self) -> bool:
return False
def build(self):
"""sizes the vectorized attributes of the GridPointStressesArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
#print('self.IDs', self.data)
self.itime = 0
self.ielement = 0
self.itotal = 0
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
self.nelements //= self.ntimes
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)
self.node = np.zeros(self.ntotal, dtype=idtype)
#oxx, oyy, txy, angle, major, minor, ovm
self.data = np.zeros((self.ntimes, self.ntotal, 8), dtype=fdtype)
self.location = np.empty(self.ntotal, dtype='U8')
self._times = np.zeros(self.ntimes, dtype=dtype)
def get_stats(self, short: bool=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
f' ntimes: {self.ntimes:d}\n',
f' ntotal: {self.ntotal:d}\n',
]
ntimes, nelements, _ = self.data.shape
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(f' node.shape = {self.node.shape}\n')
msg.append(f' location.shape = {self.location.shape}\n')
msg.append(f' data.shape = {self.data.shape}\n')
msg += self.get_data_code()
return msg
def add_sort1(self, dt, nid, nx, ny, nz, txy, tyz, txz, pressure, ovm):
assert isinstance(nid, int) and nid > 0, 'dt=%s nid=%s' % (dt, nid)
self._times[self.itime] = dt
self.node[self.itotal] = nid
self.data[self.itime, self.itotal, :] = [nx, ny, nz, txy, tyz, txz, pressure, ovm]
self.itotal += 1
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True):
"""
' D I R E C T S T R E S S E S A T G R I D P O I N T S - - V O L U M E 101'
' OUTPUT COORDINATE SYSTEM = 0 BASIC '
' GRID NORMAL-X NORMAL-Y NORMAL-Z SHEAR-XY SHEAR-YZ SHEAR-ZX MEAN VON MISES'
' ID PRESSURE'
' 1 1.455E+03 -1.548E+02 -2.927E+02 -1.573E+01 3.326E+01 -3.438E+03 -3.357E+02 6.188E+03'
' 2 1.093E+03 -1.996E+02 -1.682E+02 1.542E+02 5.962E+01 -4.104E+03 -2.417E+02 7.227E+03'
"""
if header is None:
header = []
cid = self.refid
#axis_int = self.oCoord
#axis_map = {0 : 'X', 1 : 'Y', 2 : 'Z'}
#axis = axis_map[axis_int]
msg = [
' D I R E C T S T R E S S E S A T G R I D P O I N T S - - V O L U M E %3i\n'
' OUTPUT COORDINATE SYSTEM = %7i ELEMENT \n'
' GRID NORMAL-X NORMAL-Y NORMAL-Z SHEAR-XY SHEAR-YZ SHEAR-ZX MEAN VON MISES\n'
' ID PRESSURE\n' % (
#' 8086 6.136E-02 2.131E-01 8.353E-02 -2.268E+00 -2.274E-13 1.525E-13 -1.193E-01 3.930E+00'
self.ogs_id, cid)
]
ntimes = self.data.shape[0]
nids = self.node
zero = ' '
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg))
nx = self.data[itime, :, 0]
ny = self.data[itime, :, 1]
nz = self.data[itime, :, 2]
txy = self.data[itime, :, 3]
tyz = self.data[itime, :, 4]
txz = self.data[itime, :, 5]
pressure = self.data[itime, :, 6]
ovm = self.data[itime, :, 7]
for (nid, nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi) in zip(
nids, nx, ny, nz, txy, tyz, txz, pressure, ovm):
[nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi] = write_floats_10e([
nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi])
f06_file.write('%s%8s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-s\n' % (
zero, nid, nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi.rstrip()))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for inid, nid in enumerate(self.node):
t1 = self.data[itime, inid, :]
t2 = table.data[itime, inid, :]
(nx1, ny1, nz1, txy1, tyz1, txz1, pressure1, ovm1) = t1
(nx2, ny2, nz2, txy2, tyz2, txz2, pressure2, ovm2) = t2
if not np.allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s, %s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s, %s, %s)\n' % (
nid,
nx1, ny1, nz1, txy1, tyz1, txz1, pressure1, ovm1,
nx2, ny2, nz2, txy2, tyz2, txz2, pressure2, ovm2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
#msg = [
#' P R I N C I P A L G R I D P O I N T S T R E S S D I S C O N T I N U I T I E S - - V O L U M E %s\n'
#' OUTPUT COORDINATE SYSTEM = %7i ELEMENT \n'
#' GRID PRINCIPAL STRESS DISCONTINUITY MEAN VON MISES ERROR\n'
#' ID A B C PRESSURE EST.\n' % (
#ivolume, cid)
#' 8086 5.448E-09 9.886E-08 2.026E-15 2.484E-09 1.086E-07 5.716E-08'
#]
# not sure what result this is for
#zero = ' '
#f06_file.write('%s%8s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-s\n' % (
#zero, nid, nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi.rstrip()))
GridPointStressesVolumeDiscontinutiesArray = None # tCode=34
class GridPointStressesSurfaceDiscontinutiesArray(ScalarObject): # tCode=35
def __init__(self, data_code, is_sort1, isubcase, dt):
ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)
self.ntotal = 0
self.ntimes = 0
self.nelements = 0
self.itotal = 0
self.ielement = 0
self.data = None
self.itime = None
#self.node_element = None
self._times = None
def get_headers(self) -> List[str]:
headers = ['oxx', 'oyy', 'ozz', 'txy', 'pressure']
return headers
def _reset_indices(self) -> None:
self.itotal = 0
self.ielement = 0
@property
def is_real(self) -> bool:
return True
@property
def is_complex(self) -> bool:
return False
def build(self):
"""sizes the vectorized attributes of the GridPointStressesArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
#print('self.IDs', self.data)
self.itime = 0
self.ielement = 0
self.itotal = 0
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.node = np.zeros(self.ntotal, dtype='int32')
#oxx, oyy, ozz, txy, pressure
self.data = np.zeros((self.ntimes, self.ntotal, 5), dtype='float32')
self.location = np.empty(self.ntotal, dtype='U8')
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)
self._times = np.zeros(self.ntimes, dtype=dtype)
def get_stats(self, short: bool=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
f' ntimes: {self.ntimes:d}\n',
f' ntotal: {self.ntotal:d}\n',
]
ntimes, nelements, _ = self.data.shape
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(f' node.shape = {self.node.shape}\n')
msg.append(f' location.shape = {self.location.shape}\n')
msg.append(f' data.shape = {self.data.shape}\n')
msg += self.get_data_code()
return msg
def add_sort1(self, dt, nid, oxx, oyy, ozz, txy, pressure):
assert isinstance(nid, int) and nid > 0, 'dt=%s nid=%s' % (dt, nid)
self._times[self.itime] = dt
self.node[self.itotal] = nid
self.data[self.itime, self.itotal, :] = [oxx, oyy, ozz, txy, pressure]
self.itotal += 1
class GridPointStrainsVolumePrincipalArray(GridPointStressesVolumePrincipalArray):
pass
class GridPointStrainsVolumeDirectArray(GridPointStressesVolumeDirectArray):
pass
GridPointStrainsVolumeDiscontinutiesArray = None
class GridPointStrainsSurfaceDiscontinutiesArray(GridPointStressesSurfaceDiscontinutiesArray):
pass | en | 0.26197 | ' S T R E S S E S A T G R I D P O I N T S - - S U R F A C E 5\n', '0 SURFACE X-AXIS X NORMAL(Z-AXIS) Z REFERENCE COORDINATE SYSTEM FOR SURFACE DEFINITION CID 0\n', ' GRID ELEMENT STRESSES IN SURFACE SYSTEM PRINCIPAL STRESSES MAX \n', ' ID ID FIBRE NORMAL-X NORMAL-Y SHEAR-XY ANGLE MAJOR MINOR SHEAR VON MISES\n'] '0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0' ' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0' ' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0' sizes the vectorized attributes of the GridPointStressesArray #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal)) #self.names = [] #oxx, oyy, txy, angle, major, minor, ovm #, itable=-3, itime=0): #if itable == -3: #print('*writing itable=%s' % itable) #op2_file.write(pack('12i', *header)) #else: #print('***writing itable=%s' % itable) #op2_file.write(pack('3i', *[ ##4, itable, 4, ##4, 1, 4, ##4, 0, 4, #4, 146, 4, #])) #[ #'aCode', 'tCode', 'element_type', 'isubcase', #'???', '???', '???', 'load_set' #'format_code', 'num_wide', 's_code', '???', #'???', '???', '???', '???', #'???', '???', '???', '???', #'???', '???', '???', '???', #'???', 'Title', 'subtitle', 'label'] #random_code = self.random_code #print(''.join(self.get_stats())) # poor sort2 -> sort1 #field5 = 1 # field 6 # field 7 #elif self.analysis_code == 3: #field5 = self.freqs[itime] # field 5 # field 5 # pre-buckling # load set number # post-buckling # load set number #if hasattr(self, 'eigns'): # pragma: no cover # field 6 # complex eigenvalues # pragma: no cover # field 6 # field 7 # nonlinear statics # field 5; load step # old geometric nonlinear statics # load set number #self.ogs = self.add_data_parameter(data, 'ogs_id', b'i', 3, False) #self.refid = self.add_data_parameter(data, 'refid', b'i', 8, False) #self.format_code = self.add_data_parameter(data, 'format_code', b'i', 9, False) #self.num_wide = self.add_data_parameter(data, 'num_wide', b'i', 10, False) #self.sCode = self.add_data_parameter(data, 'sCode', b'i', 11, False) #self.oCoord = self.add_data_parameter(data, 'oCoord', b'i', 12, False) #self.axis = self.add_data_parameter(data, 'axis', b'i', 13, False) #self.normal = self.add_data_parameter(data, 'normal', b'i', 14, False) #print('write_table_3', i, v) #print(fmt) #print(data) #f.write(pack(fascii, '%s header 3c' % self.table_name, fmt, data)) #def build_dataframe(self): #"""creates a pandas dataframe""" #import pandas as pd #headers = self.get_headers() #element_node = [self.element_node[:, 0], self.element_node[:, 1]] #if self.nonlinear_factor not in (None, np.nan): #column_names, column_values = self._build_dataframe_transient_header() #self.data_frame = pd.Panel(self.data, items=column_values, major_axis=element_node, minor_axis=headers).to_frame() #self.data_frame.columns.names = column_names #else: #self.data_frame = pd.Panel(self.data, major_axis=element_node, minor_axis=headers).to_frame() #self.data_frame.columns.names = ['Static'] #self.data_frame.index.names = ['NodeID', 'ElementID', 'Item'] unvectorized method for adding SORT1 transient data #assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid) # transient writes an OP2 #print('***************', itable) #if isinstance(self.nonlinear_factor, float): #op2_format = '%sif' % (7 * self.ntimes) #raise NotImplementedError() #else: #op2_format = 'i21f' #s = Struct(op2_format) #eids2 = self.element_node[:, 0] #nodes = self.element_node[:, 1] #nelements_nodes = len(nodes) #eids3 = self.element_cid[:, 0] #cids3 = self.element_cid[:, 1] # table 4 info #ntimes = self.data.shape[0] #nnodes = self.data.shape[1] #nelements = len(np.unique(eids2)) # 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm #ntotal = ((nnodes * 21) + 1) + (nelements * 4) #nnodes_centroid = self.nnodes_per_element #nnodes_no_centroid = self.nnodes_per_element_no_centroid #ntotali = 11 #print('shape = %s' % str(self.data.shape)) #assert nnodes > 1, nnodes #assert self.ntimes == 1, self.ntimes #print('ntotal=%s' % (ntotal)) #op2_format = endian + b'2i6f' #idtype = self.element_cid.dtype #print(self.size) #[nids, eids, fibers, nx, ny, txy, angle, majorp, minorp, tmax, ovm] # speed up transient cases, but slightly slows down static cases # setting: # - [nid_device, eids, location_bytes] #nx = self.data[itime, :, 0] #ny = self.data[itime, :, 1] #txy = self.data[itime, :, 2] #angle = self.data[itime, :, 3] #majorp = self.data[itime, :, 4] #minorp = self.data[itime, :, 5] #tmax = self.data[itime, :, 6] #ovm = self.data[itime, :, 7] #fibers = self.location #cen_array = np.full(nelements, grid_bytes, dtype='|S4') #nnodes_no_centroid_array = np.full(nelements, nnodes_no_centroid, dtype=idtype) #element_wise_data = to_column_bytes([ #element_device, # ints #cids3, # ints #cen_array, # bytes #nnodes_no_centroid_array, # ints #], fdtype, debug=False) # we could tack the nodes on, so we don't have to keep stacking it # but we run into issues with datai # # total=nelements_nodes #nodes_view = nodes.view(fdtype).reshape(nelements, nnodes_centroid) #inode = np.arange(nnodes_centroid) #data_out[:, 4+inode*21] = nodes_view[:, inode] # record 4 #print('stress itable = %s' % itable) # stack each output by columns and fix any dtypes #datai2 = datai.reshape(nelements, 21*nnodes_centroid) #data_out = np.hstack([element_wise_data, datai2]) #data_out[:, 4:] = datai2 # switch datai to element format and put it in the output buffer # pragma: no cover #if not np.array_equal(t1, t2): #'0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0' #' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0' #' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0' #f' S T R E S S E S A T G R I D P O I N T S - - S U R F A C E {ogs_id:d}\n', #' GRID ELEMENT STRESSES IN SURFACE SYSTEM PRINCIPAL STRESSES MAX \n', #'0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0' #' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0' #' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0' # pragma: no cover #if not np.array_equal(t1, t2): sizes the vectorized attributes of the GridPointStressesArray #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal)) #print('self.IDs', self.data) #lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm # transient #def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', #page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True): #pass sizes the vectorized attributes of the GridPointStressesArray #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal)) #print('self.IDs', self.data) #oxx, oyy, txy, angle, major, minor, ovm # transient ' D I R E C T S T R E S S E S A T G R I D P O I N T S - - V O L U M E 101' ' OUTPUT COORDINATE SYSTEM = 0 BASIC ' ' GRID NORMAL-X NORMAL-Y NORMAL-Z SHEAR-XY SHEAR-YZ SHEAR-ZX MEAN VON MISES' ' ID PRESSURE' ' 1 1.455E+03 -1.548E+02 -2.927E+02 -1.573E+01 3.326E+01 -3.438E+03 -3.357E+02 6.188E+03' ' 2 1.093E+03 -1.996E+02 -1.682E+02 1.542E+02 5.962E+01 -4.104E+03 -2.417E+02 7.227E+03' #axis_int = self.oCoord #axis_map = {0 : 'X', 1 : 'Y', 2 : 'Z'} #axis = axis_map[axis_int] #' 8086 6.136E-02 2.131E-01 8.353E-02 -2.268E+00 -2.274E-13 1.525E-13 -1.193E-01 3.930E+00' # pragma: no cover #if not np.array_equal(t1, t2): #msg = [ #' P R I N C I P A L G R I D P O I N T S T R E S S D I S C O N T I N U I T I E S - - V O L U M E %s\n' #' OUTPUT COORDINATE SYSTEM = %7i ELEMENT \n' #' GRID PRINCIPAL STRESS DISCONTINUITY MEAN VON MISES ERROR\n' #' ID A B C PRESSURE EST.\n' % ( #ivolume, cid) #' 8086 5.448E-09 9.886E-08 2.026E-15 2.484E-09 1.086E-07 5.716E-08' #] # not sure what result this is for #zero = ' ' #f06_file.write('%s%8s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-s\n' % ( #zero, nid, nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi.rstrip())) # tCode=34 # tCode=35 #self.node_element = None sizes the vectorized attributes of the GridPointStressesArray #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal)) #print('self.IDs', self.data) #self.names = [] #oxx, oyy, ozz, txy, pressure # transient | 2.184997 | 2 |
three_wolves/deep_whole_body_controller/utility/reward_utils.py | 42jaylonw/rrc_2021_three_wolves | 0 | 6630365 | import numpy as np
def ComputeDist(p0, p1):
return np.linalg.norm(np.subtract(p1, p0))
def FVCap(v_cap, r):
return max(-v_cap, min(r, v_cap))
def ComputeAcc(pos_3, time_step=0.1):
assert pos_3.shape == (3, 3)
vel_0 = ComputeDist(pos_3[0], pos_3[1]) / time_step
vel_1 = ComputeDist(pos_3[1], pos_3[2]) / time_step
acc_3 = (vel_1 - vel_0) / time_step
return acc_3
def ExpSqr(cur, tar=0, wei=-3):
assert wei < 0
return np.sum(np.exp(wei * np.square(np.abs(tar - cur))))
def Delta(seq):
seq = np.array(seq)
assert seq.ndim == 1
_diff = seq - np.mean(seq)
return np.sum(np.abs(_diff))
| import numpy as np
def ComputeDist(p0, p1):
return np.linalg.norm(np.subtract(p1, p0))
def FVCap(v_cap, r):
return max(-v_cap, min(r, v_cap))
def ComputeAcc(pos_3, time_step=0.1):
assert pos_3.shape == (3, 3)
vel_0 = ComputeDist(pos_3[0], pos_3[1]) / time_step
vel_1 = ComputeDist(pos_3[1], pos_3[2]) / time_step
acc_3 = (vel_1 - vel_0) / time_step
return acc_3
def ExpSqr(cur, tar=0, wei=-3):
assert wei < 0
return np.sum(np.exp(wei * np.square(np.abs(tar - cur))))
def Delta(seq):
seq = np.array(seq)
assert seq.ndim == 1
_diff = seq - np.mean(seq)
return np.sum(np.abs(_diff))
| none | 1 | 2.319471 | 2 |
|
analysis_scripts/wga_norm_and_thresh.py | SpeerLab/STORM-UI- | 1 | 6630366 | <filename>analysis_scripts/wga_norm_and_thresh.py
import sys
import glob
import os
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy.interpolate import make_interp_spline,BSpline
from scipy.stats import zscore
from scipy.interpolate import UnivariateSpline
from skimage import transform
from PIL import Image
from imageio import imwrite
import yaml
def cal_hist(wgafile,num_images):
print(wgafile)
A = mpimg.imread(wgafile)
hist,bins = np.histogram(A.ravel(),255,[1,255])
return hist
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template histogram; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
# template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values = np.arange(0, template.shape[0])
t_counts = template
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def wga_norm_and_thresh(exp_folder, alignment_channel):
# Assert alignment_channel is correct
assert alignment_channel in [561, 488]
print('Loading in analysis config')
# Read in parameters from yaml file
with open('./configs/bead_analysis_params.yml') as f:
config = yaml.load(f)
shape = (config['shape_h'], config['shape_w'])
exp_folder = os.path.normpath(exp_folder) + "\\"
storm_merged_path = exp_folder + 'unaligned\\storm_merged\\'
conv_align_path = exp_folder + 'unaligned\\conv_{}\\'.format(str(alignment_channel))
storm_merged_files = glob.glob(storm_merged_path + '*.tif')
num_merged_images = len(storm_merged_files)
wga_files = glob.glob(conv_align_path + '*.tif')
num_wga_images = len(wga_files)
assert num_merged_images == num_wga_images, "Number of images must match!"
num_images = num_merged_images
hy3c = np.zeros((num_images, 255))
hy4c = np.zeros((num_images, 255))
hy3cb = np.zeros((num_images, 255))
hy4cb = np.zeros((num_images, 255))
print('Calculating histograms!')
print(num_images)
for i in range(num_images):
hy3c[i] = cal_hist(storm_merged_files[i], num_images) # storm_merged
hy4c[i] = cal_hist(wga_files[i], num_images) # conv_561
# Normalizing counts to 0-1 range
hy3cb = hy3c / hy3c.sum(axis=1, keepdims=True)
hy4cb = hy4c / hy4c.sum(axis=1, keepdims=True)
chan = hy4cb
varuse4 = np.zeros([num_images, 255])
x_hist = np.arange(1,255)
x_sections = np.arange(0, num_images)
print('Thresholding!!')
for i in range(255):
zthresh = 3
curr_param = chan[:, i] # Distribution of channel i values across all images
mean = np.mean(curr_param, axis=0)
sd = np.std(curr_param, axis=0)
distance_from_mean = abs(chan[:, i] - mean)
mask = distance_from_mean < zthresh * sd
# Select which sections can be used for smooth interpolation
currfitx = x_sections[mask]
currfity = curr_param[mask]
# currfitx = (currfitx - np.mean(currfitx)) / (np.std(currfitx) + 0.00001)
# currfity = (currfity - np.mean(currfity)) / (np.std(currfity) + 0.00001)
spl = UnivariateSpline(currfitx, currfity)
spl.set_smoothing_factor(0.9)
varuse4[:, i] = spl(np.arange(0,num_images))
path4 = exp_folder + 'unaligned\\for_align\\'
path4a = exp_folder + 'unaligned\\for_align_ds\\'
print('Saving out new images!')
if not os.path.exists(path4):
os.mkdir(path4)
if not os.path.exists(path4a):
os.mkdir(path4a)
for i in range(num_images):
hgram4 = varuse4[i] / sum(varuse4[i]) # Normalize over the channels for each image
# Read in the storm file
A = mpimg.imread(wga_files[i])
hist,bins = np.histogram(A.ravel(),256,[0,255])
hist_cum = np.cumsum(hist)
a = np.array(hist[0])
b = hgram4*(sum(hist)-hist[0])
hgram4a = np.concatenate((a,b),axis=None)
out = hist_match(A, hgram4a)
#Change: debug part commented out !!!!!!!!!!!!!!!!!!!
#import pdb; pdb.set_trace()
#out[A < 1] = 0
out_align = out
out_align_small = transform.rescale(out_align, 0.1)
imwrite(path4 + wga_files[i].split('\\')[-1], out_align)
imwrite(path4a + wga_files[i].split('\\')[-1], out_align_small)
print('Done!')
return True
| <filename>analysis_scripts/wga_norm_and_thresh.py
import sys
import glob
import os
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy.interpolate import make_interp_spline,BSpline
from scipy.stats import zscore
from scipy.interpolate import UnivariateSpline
from skimage import transform
from PIL import Image
from imageio import imwrite
import yaml
def cal_hist(wgafile,num_images):
print(wgafile)
A = mpimg.imread(wgafile)
hist,bins = np.histogram(A.ravel(),255,[1,255])
return hist
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template histogram; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
# template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values = np.arange(0, template.shape[0])
t_counts = template
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def wga_norm_and_thresh(exp_folder, alignment_channel):
# Assert alignment_channel is correct
assert alignment_channel in [561, 488]
print('Loading in analysis config')
# Read in parameters from yaml file
with open('./configs/bead_analysis_params.yml') as f:
config = yaml.load(f)
shape = (config['shape_h'], config['shape_w'])
exp_folder = os.path.normpath(exp_folder) + "\\"
storm_merged_path = exp_folder + 'unaligned\\storm_merged\\'
conv_align_path = exp_folder + 'unaligned\\conv_{}\\'.format(str(alignment_channel))
storm_merged_files = glob.glob(storm_merged_path + '*.tif')
num_merged_images = len(storm_merged_files)
wga_files = glob.glob(conv_align_path + '*.tif')
num_wga_images = len(wga_files)
assert num_merged_images == num_wga_images, "Number of images must match!"
num_images = num_merged_images
hy3c = np.zeros((num_images, 255))
hy4c = np.zeros((num_images, 255))
hy3cb = np.zeros((num_images, 255))
hy4cb = np.zeros((num_images, 255))
print('Calculating histograms!')
print(num_images)
for i in range(num_images):
hy3c[i] = cal_hist(storm_merged_files[i], num_images) # storm_merged
hy4c[i] = cal_hist(wga_files[i], num_images) # conv_561
# Normalizing counts to 0-1 range
hy3cb = hy3c / hy3c.sum(axis=1, keepdims=True)
hy4cb = hy4c / hy4c.sum(axis=1, keepdims=True)
chan = hy4cb
varuse4 = np.zeros([num_images, 255])
x_hist = np.arange(1,255)
x_sections = np.arange(0, num_images)
print('Thresholding!!')
for i in range(255):
zthresh = 3
curr_param = chan[:, i] # Distribution of channel i values across all images
mean = np.mean(curr_param, axis=0)
sd = np.std(curr_param, axis=0)
distance_from_mean = abs(chan[:, i] - mean)
mask = distance_from_mean < zthresh * sd
# Select which sections can be used for smooth interpolation
currfitx = x_sections[mask]
currfity = curr_param[mask]
# currfitx = (currfitx - np.mean(currfitx)) / (np.std(currfitx) + 0.00001)
# currfity = (currfity - np.mean(currfity)) / (np.std(currfity) + 0.00001)
spl = UnivariateSpline(currfitx, currfity)
spl.set_smoothing_factor(0.9)
varuse4[:, i] = spl(np.arange(0,num_images))
path4 = exp_folder + 'unaligned\\for_align\\'
path4a = exp_folder + 'unaligned\\for_align_ds\\'
print('Saving out new images!')
if not os.path.exists(path4):
os.mkdir(path4)
if not os.path.exists(path4a):
os.mkdir(path4a)
for i in range(num_images):
hgram4 = varuse4[i] / sum(varuse4[i]) # Normalize over the channels for each image
# Read in the storm file
A = mpimg.imread(wga_files[i])
hist,bins = np.histogram(A.ravel(),256,[0,255])
hist_cum = np.cumsum(hist)
a = np.array(hist[0])
b = hgram4*(sum(hist)-hist[0])
hgram4a = np.concatenate((a,b),axis=None)
out = hist_match(A, hgram4a)
#Change: debug part commented out !!!!!!!!!!!!!!!!!!!
#import pdb; pdb.set_trace()
#out[A < 1] = 0
out_align = out
out_align_small = transform.rescale(out_align, 0.1)
imwrite(path4 + wga_files[i].split('\\')[-1], out_align)
imwrite(path4a + wga_files[i].split('\\')[-1], out_align_small)
print('Done!')
return True
| en | 0.753595 | Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template histogram; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image # template = template.ravel() # get the set of unique pixel values and their corresponding indices and # counts # take the cumsum of the counts and normalize by the number of pixels to # get the empirical cumulative distribution functions for the source and # template images (maps pixel value --> quantile) # interpolate linearly to find the pixel values in the template image # that correspond most closely to the quantiles in the source image # Assert alignment_channel is correct # Read in parameters from yaml file # storm_merged # conv_561 # Normalizing counts to 0-1 range # Distribution of channel i values across all images # Select which sections can be used for smooth interpolation # currfitx = (currfitx - np.mean(currfitx)) / (np.std(currfitx) + 0.00001) # currfity = (currfity - np.mean(currfity)) / (np.std(currfity) + 0.00001) # Normalize over the channels for each image # Read in the storm file #Change: debug part commented out !!!!!!!!!!!!!!!!!!! #import pdb; pdb.set_trace() #out[A < 1] = 0 | 2.483359 | 2 |
examples/codelab/generate_customer_journeys.py | miracvbasaran/PipelineDP | 0 | 6630367 | """Generate synthetic customer journeys for PipelineDP codelab."""
from absl import app
from absl import flags
import enum
from typing import Optional
import numpy as np
import pandas as pd
FLAGS = flags.FLAGS
flags.DEFINE_integer('n_customers', 100, 'The number of customers to simulate.')
flags.DEFINE_float('conversion_rate', .2, 'Conversion rate to simulate.')
flags.DEFINE_integer('random_seed', None, 'Random seed to use for simulations.')
class AvailableProducts(enum.IntEnum):
"""Class of available products."""
JUMPER = 1
T_SHIRT = 2
SOCKS = 3
JEANS = 4
_MINIMUM_PRICE = {'jumper': 40, 't_shirt': 20, 'socks': 5, 'jeans': 70}
class Product:
"""Class of products that can be viewed throughout a customer journey."""
def __init__(self, product: AvailableProducts):
self.name = product.name.lower()
if self.name not in _MINIMUM_PRICE.keys():
raise ValueError(
f"{self.name} needs to be one of {_MINIMUM_PRICE.keys()}")
self.minimum_price = _MINIMUM_PRICE[self.name]
def cost(self,
random_generator: Optional[np.random.Generator] = None) -> float:
if not random_generator:
random_generator = np.random.default_rng()
return self.minimum_price + abs(np.round(random_generator.normal(), 2))
def create_customer_journeys(
n_samples: int = 100,
conversion_rate: float = .3,
product_view_rate: float = .6,
max_product_view: int = 5,
random_generator: Optional[np.random.Generator] = None) -> pd.DataFrame:
"""Creates synthetic data of customer product views and conversions.
Args:
n_samples: Number of samples to be generated.
conversion_rate: Assumed conversion rate, i.e. probability that customer
makes a purchase. Needs to be between 0-1.
product_view_rate: Assumed probability that customer views a product. Needs
to be between 0-1.
max_product_view: Upper limit of possible product views. Needs to be >0. The
expected number of viewed products is product_view_rate *
max_product_view. For instance, if product_view_rate is .50 and
max_product_view is 4, a customer will on minimum view two products.
random_generator: Random generator that can be passed to make outputs
reproducible.
Returns:
DataFrame of synthetic data.
Raises:
UserWarning: if either conversion_rate or product_view_rate is 0.
ValueError: if max_product_view is 0.
"""
all_customer_journeys = []
if conversion_rate == 0 or product_view_rate == 0:
raise UserWarning(
'Setting conversion_rate or product_view_rate to 0 implies that no conversions can occur.'
)
if max_product_view <= 0:
raise ValueError(
f'max_product_view needs to be larger 0, but is {max_product_view}')
if not random_generator:
random_generator = np.random.default_rng()
for _ in range(n_samples):
n_products_viewed = np.sum(
random_generator.binomial(1,
p=product_view_rate,
size=max_product_view))
which_products_viewed = random_generator.integers(
1, len(list(AvailableProducts)) + 1, size=n_products_viewed)
is_conversion = random_generator.binomial(1, p=conversion_rate)
products_viewed = {}
basket_value = 0
for index, product_id in enumerate(which_products_viewed):
product = Product(product=AvailableProducts(product_id))
products_viewed[f'product_view_{index}'] = product.name
if is_conversion:
basket_value += product.cost(random_generator=random_generator)
products_viewed['conversion_value'] = basket_value
all_customer_journeys.append(products_viewed)
data = pd.DataFrame(all_customer_journeys)
data.replace({'t_shirt': 't-shirt'}, inplace=True)
return data.reindex(sorted(data.columns), axis=1)
def main(unused_argv):
rng = np.random.default_rng(FLAGS.random_seed)
df = create_customer_journeys(FLAGS.n_customers,
conversion_rate=FLAGS.conversion_rate,
random_generator=rng)
df['has_conversion'] = (df['conversion_value'] > 0)
df['user_id'] = df.index.values
df.dropna(subset=['product_view_0'], inplace=True)
df.fillna('none', inplace=True)
df.to_csv('synthetic_customer_journeys.csv')
if __name__ == '__main__':
app.run(main)
| """Generate synthetic customer journeys for PipelineDP codelab."""
from absl import app
from absl import flags
import enum
from typing import Optional
import numpy as np
import pandas as pd
FLAGS = flags.FLAGS
flags.DEFINE_integer('n_customers', 100, 'The number of customers to simulate.')
flags.DEFINE_float('conversion_rate', .2, 'Conversion rate to simulate.')
flags.DEFINE_integer('random_seed', None, 'Random seed to use for simulations.')
class AvailableProducts(enum.IntEnum):
"""Class of available products."""
JUMPER = 1
T_SHIRT = 2
SOCKS = 3
JEANS = 4
_MINIMUM_PRICE = {'jumper': 40, 't_shirt': 20, 'socks': 5, 'jeans': 70}
class Product:
"""Class of products that can be viewed throughout a customer journey."""
def __init__(self, product: AvailableProducts):
self.name = product.name.lower()
if self.name not in _MINIMUM_PRICE.keys():
raise ValueError(
f"{self.name} needs to be one of {_MINIMUM_PRICE.keys()}")
self.minimum_price = _MINIMUM_PRICE[self.name]
def cost(self,
random_generator: Optional[np.random.Generator] = None) -> float:
if not random_generator:
random_generator = np.random.default_rng()
return self.minimum_price + abs(np.round(random_generator.normal(), 2))
def create_customer_journeys(
n_samples: int = 100,
conversion_rate: float = .3,
product_view_rate: float = .6,
max_product_view: int = 5,
random_generator: Optional[np.random.Generator] = None) -> pd.DataFrame:
"""Creates synthetic data of customer product views and conversions.
Args:
n_samples: Number of samples to be generated.
conversion_rate: Assumed conversion rate, i.e. probability that customer
makes a purchase. Needs to be between 0-1.
product_view_rate: Assumed probability that customer views a product. Needs
to be between 0-1.
max_product_view: Upper limit of possible product views. Needs to be >0. The
expected number of viewed products is product_view_rate *
max_product_view. For instance, if product_view_rate is .50 and
max_product_view is 4, a customer will on minimum view two products.
random_generator: Random generator that can be passed to make outputs
reproducible.
Returns:
DataFrame of synthetic data.
Raises:
UserWarning: if either conversion_rate or product_view_rate is 0.
ValueError: if max_product_view is 0.
"""
all_customer_journeys = []
if conversion_rate == 0 or product_view_rate == 0:
raise UserWarning(
'Setting conversion_rate or product_view_rate to 0 implies that no conversions can occur.'
)
if max_product_view <= 0:
raise ValueError(
f'max_product_view needs to be larger 0, but is {max_product_view}')
if not random_generator:
random_generator = np.random.default_rng()
for _ in range(n_samples):
n_products_viewed = np.sum(
random_generator.binomial(1,
p=product_view_rate,
size=max_product_view))
which_products_viewed = random_generator.integers(
1, len(list(AvailableProducts)) + 1, size=n_products_viewed)
is_conversion = random_generator.binomial(1, p=conversion_rate)
products_viewed = {}
basket_value = 0
for index, product_id in enumerate(which_products_viewed):
product = Product(product=AvailableProducts(product_id))
products_viewed[f'product_view_{index}'] = product.name
if is_conversion:
basket_value += product.cost(random_generator=random_generator)
products_viewed['conversion_value'] = basket_value
all_customer_journeys.append(products_viewed)
data = pd.DataFrame(all_customer_journeys)
data.replace({'t_shirt': 't-shirt'}, inplace=True)
return data.reindex(sorted(data.columns), axis=1)
def main(unused_argv):
rng = np.random.default_rng(FLAGS.random_seed)
df = create_customer_journeys(FLAGS.n_customers,
conversion_rate=FLAGS.conversion_rate,
random_generator=rng)
df['has_conversion'] = (df['conversion_value'] > 0)
df['user_id'] = df.index.values
df.dropna(subset=['product_view_0'], inplace=True)
df.fillna('none', inplace=True)
df.to_csv('synthetic_customer_journeys.csv')
if __name__ == '__main__':
app.run(main)
| en | 0.834285 | Generate synthetic customer journeys for PipelineDP codelab. Class of available products. Class of products that can be viewed throughout a customer journey. Creates synthetic data of customer product views and conversions. Args: n_samples: Number of samples to be generated. conversion_rate: Assumed conversion rate, i.e. probability that customer makes a purchase. Needs to be between 0-1. product_view_rate: Assumed probability that customer views a product. Needs to be between 0-1. max_product_view: Upper limit of possible product views. Needs to be >0. The expected number of viewed products is product_view_rate * max_product_view. For instance, if product_view_rate is .50 and max_product_view is 4, a customer will on minimum view two products. random_generator: Random generator that can be passed to make outputs reproducible. Returns: DataFrame of synthetic data. Raises: UserWarning: if either conversion_rate or product_view_rate is 0. ValueError: if max_product_view is 0. | 3.162275 | 3 |
django/docs/releases/1.4.14.txt.py | roshanba/mangal | 0 | 6630368 | XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXX XXXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXX XXXXX
XXXXXX XXXXXX XXXXX XXXXXXX XXXXXXXX XXXXXX XX XXXXXXX
XXXXXXXXXXXXX XXXXX XXXXXXXX XXXX XXXXXXXX XX XXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXX XXX XXXXXXXXX XXXXX XXXXXXXX XXXXXXXXXXXXXXX XXXX XXXXX
XXXXXXXX XXXX XXX XXXXXXXXX XXXXX XXXXX XXXXXXXXXXXX XXXXXXXX X XXXX XX X
XXXXXXXXX XXXXX XX XXXXXXXX XXXXX XXXXXXX XXXXX XXX XXXXXXXX XX XXXXXXXXXXX
XXXXX XX X XXXXXXXX XXXX XXXXXXXX XX XXX XXX XXXXXX XXXXXXXXXX
XX XXXXXX XXXXX XXX XXXXXXXXX XXX XXXXXXX XXXX XX XXX XXXXXX XXXX XXX XXXXXXX
XXXXX XXXXXXXXX XXX XXXXXX XXXXX XXXX XXX XXX XXXXXXX XXXXXXXXXXX XXXXXX XXXX
XXXXXXXX XXXXXXX XXXX XXXXXXXXX XXXX XXX XXXXX XXXXX XXXXXX XXX XXX XXXXXXXX XX
XXX XXXXXX XXX XXX XX XXX XXXXXXX
XXXX XXXXXX XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXX XXXXXXXX XXXXXXXX XXXX XXXXXX XXXXXXX XX XXX XXXXXXX XXXXXXXXXXXXX
XXX XXXXXXX XX XXXXXXXXX X XXXX XXXXXX XX XXXXXXXXXXXXX XXXXXX XXXXX XXXX X
XXXXXXXXX XXXXXXXX XX XXXXXXXXX XXXXX XXXXXXXXXX XXX XXXXXX XXX XXXX XXX XXXXXXX
X XXXX XXXXXXXXXXXXXX XXXXXXXX XXXX XXXXXX XXXXXXX XXXX XXXXX XXX XXX XXXXXX XX
XXXX XXXXX XXXXXX XXXXX X XXXX XXXX XXX XXXXXXX XX XXXXXX XXXXX XXX XXXXX XXXX
XXXXXXXXXXX XX XXX XXXXXX XXXXXXXX XXXXXXXXXX XXXXXXX XX XX XXXXXX XXXX XXXX
XXXXXX XX XXXXXXXXX XXXXXX XXXXXX XX XXXX XXXXXX XXXX X XXXX XXXXXXX XXXXXXXXXX
XXX XXX XXXX XXXXXXXX XXXXX XX XXX XXXX XX XXXXXXXXX XX XXXX X XXXX XXXXXXXXXXXX
XXXXX XXXXXXXX XXX XXXXX XX XXXXXXXX XXX XXXXXXXXX XXX XXXXXXXXXX XXXX XXXXX
XX X XXXX XXXX XXX XXXXXXXX XXXX XXXXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXX XX
XXXXXXXXXX XXXX X XXXXXX X XXXXXXXXX XXXXXXXXXXXX XXXXXX XXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXX XXXXXXXXX XXXXXXX XX XXXXXXXXXX XXXXXXXX XX X XXXXXX XXXXX XXXXXXXXX
XXXXXXXXX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXX XXXXXXXXXXXXXXXXXXXXXX X XXXXXX XX XXX XXXXXXXXXXXXXXX XXXXXX XXXXXXX
XXXXXXXX XXXXXXX XX XXXXXXXXXXX XXXXXX XXXXX XXXXXX XX XXX XXXXX XXXXXX XXXXXXX
XXXXX XXXXXXXX XX XXX XXXXXXXXXX XXXXX XXX XXXXXXXXXX XXX XXXX XXX XXXX XXX XX
X XXXXXX XXXXX XXXXXXXX
XXXX XXXXXXX XXX XXXXX XXXXXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXX XXXXXXXX XX XXXXXX XX XXX XXXXXXXX XX XXXXXX XXX XXXXXXX XXXX XX
XXXXXXXXX XXX XXXXXXX XXX XXXXXXXXXX XXXXXXXXXX XX XXX XXXXX XXXXXX XX XX XXXXX
XXXXXX XXXX XXXXX XXX XXXXXXXX XXXXXXXXXX X XXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXX XXX XXXXXX XXXX XXXXXXX
XXXXXXX XXX XXXXXXXX XXXX XX XXXX XXXXX XXXXX XXX XXXXX XXXXXXXX XXXXX XX XXXX
XXXXXXXXXXX XX XXXX XXX XXXXXX XXXX XXXXX XX XXX XXXXX XXXXXX XXXX XXXXX XXXX
XXXX XX XXX XXXX XX XXXXX XXXXXX XXXXXX XX XXXX XXXX XXXXXXX XXXXXX XX X XXXXXX
XX XXXXXXX XXX XXXXXX XX XXXXXXXXX XXXX XXX XX XXXXXX XX X XXXXXXXXXXXX XXXXX
XXXX XXXXX X XXXXXXX XXXXX XX X XXXXX XXXX XXX XXXX XXXXXXXXXX XXXX XXX XXXXX
XX XXXXXXXXXX
| XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXX XXXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXX XXXXX
XXXXXX XXXXXX XXXXX XXXXXXX XXXXXXXX XXXXXX XX XXXXXXX
XXXXXXXXXXXXX XXXXX XXXXXXXX XXXX XXXXXXXX XX XXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXX XXX XXXXXXXXX XXXXX XXXXXXXX XXXXXXXXXXXXXXX XXXX XXXXX
XXXXXXXX XXXX XXX XXXXXXXXX XXXXX XXXXX XXXXXXXXXXXX XXXXXXXX X XXXX XX X
XXXXXXXXX XXXXX XX XXXXXXXX XXXXX XXXXXXX XXXXX XXX XXXXXXXX XX XXXXXXXXXXX
XXXXX XX X XXXXXXXX XXXX XXXXXXXX XX XXX XXX XXXXXX XXXXXXXXXX
XX XXXXXX XXXXX XXX XXXXXXXXX XXX XXXXXXX XXXX XX XXX XXXXXX XXXX XXX XXXXXXX
XXXXX XXXXXXXXX XXX XXXXXX XXXXX XXXX XXX XXX XXXXXXX XXXXXXXXXXX XXXXXX XXXX
XXXXXXXX XXXXXXX XXXX XXXXXXXXX XXXX XXX XXXXX XXXXX XXXXXX XXX XXX XXXXXXXX XX
XXX XXXXXX XXX XXX XX XXX XXXXXXX
XXXX XXXXXX XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXX XXXXXXXX XXXXXXXX XXXX XXXXXX XXXXXXX XX XXX XXXXXXX XXXXXXXXXXXXX
XXX XXXXXXX XX XXXXXXXXX X XXXX XXXXXX XX XXXXXXXXXXXXX XXXXXX XXXXX XXXX X
XXXXXXXXX XXXXXXXX XX XXXXXXXXX XXXXX XXXXXXXXXX XXX XXXXXX XXX XXXX XXX XXXXXXX
X XXXX XXXXXXXXXXXXXX XXXXXXXX XXXX XXXXXX XXXXXXX XXXX XXXXX XXX XXX XXXXXX XX
XXXX XXXXX XXXXXX XXXXX X XXXX XXXX XXX XXXXXXX XX XXXXXX XXXXX XXX XXXXX XXXX
XXXXXXXXXXX XX XXX XXXXXX XXXXXXXX XXXXXXXXXX XXXXXXX XX XX XXXXXX XXXX XXXX
XXXXXX XX XXXXXXXXX XXXXXX XXXXXX XX XXXX XXXXXX XXXX X XXXX XXXXXXX XXXXXXXXXX
XXX XXX XXXX XXXXXXXX XXXXX XX XXX XXXX XX XXXXXXXXX XX XXXX X XXXX XXXXXXXXXXXX
XXXXX XXXXXXXX XXX XXXXX XX XXXXXXXX XXX XXXXXXXXX XXX XXXXXXXXXX XXXX XXXXX
XX X XXXX XXXX XXX XXXXXXXX XXXX XXXXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXX XX
XXXXXXXXXX XXXX X XXXXXX X XXXXXXXXX XXXXXXXXXXXX XXXXXX XXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXX XXXXXXXXX XXXXXXX XX XXXXXXXXXX XXXXXXXX XX X XXXXXX XXXXX XXXXXXXXX
XXXXXXXXX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXX XXXXXXXXXXXXXXXXXXXXXX X XXXXXX XX XXX XXXXXXXXXXXXXXX XXXXXX XXXXXXX
XXXXXXXX XXXXXXX XX XXXXXXXXXXX XXXXXX XXXXX XXXXXX XX XXX XXXXX XXXXXX XXXXXXX
XXXXX XXXXXXXX XX XXX XXXXXXXXXX XXXXX XXX XXXXXXXXXX XXX XXXX XXX XXXX XXX XX
X XXXXXX XXXXX XXXXXXXX
XXXX XXXXXXX XXX XXXXX XXXXXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXX XXXXXXXX XX XXXXXX XX XXX XXXXXXXX XX XXXXXX XXX XXXXXXX XXXX XX
XXXXXXXXX XXX XXXXXXX XXX XXXXXXXXXX XXXXXXXXXX XX XXX XXXXX XXXXXX XX XX XXXXX
XXXXXX XXXX XXXXX XXX XXXXXXXX XXXXXXXXXX X XXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXX XXX XXXXXX XXXX XXXXXXX
XXXXXXX XXX XXXXXXXX XXXX XX XXXX XXXXX XXXXX XXX XXXXX XXXXXXXX XXXXX XX XXXX
XXXXXXXXXXX XX XXXX XXX XXXXXX XXXX XXXXX XX XXX XXXXX XXXXXX XXXX XXXXX XXXX
XXXX XX XXX XXXX XX XXXXX XXXXXX XXXXXX XX XXXX XXXX XXXXXXX XXXXXX XX X XXXXXX
XX XXXXXXX XXX XXXXXX XX XXXXXXXXX XXXX XXX XX XXXXXX XX X XXXXXXXXXXXX XXXXX
XXXX XXXXX X XXXXXXX XXXXX XX X XXXXX XXXX XXX XXXX XXXXXXXXXX XXXX XXX XXXXX
XX XXXXXXXXXX
| none | 1 | 1.376586 | 1 |
|
pyScript_NodeManager/NodeOutput.py | Shirazbello/Pyscriptining | 0 | 6630369 | <filename>pyScript_NodeManager/NodeOutput.py
from PySide2.QtWidgets import QWidget, QGridLayout, QPushButton, QComboBox, QLineEdit, QMessageBox
class NodeOutput(QWidget):
def __init__(self, content_widget):
super(NodeOutput, self).__init__()
self.content_widget = content_widget
# create UI
# create all layouts
self.grid_layout = QGridLayout(self)
# move buttons
self.up_button = QPushButton(self, '')
self.down_button = QPushButton(self, '')
# type and label
self.type_combo_box = QComboBox(self)
self.type_combo_box.addItem('exec')
self.type_combo_box.addItem('data')
self.label_line_edit = QLineEdit(self)
self.label_line_edit.setPlaceholderText('Label')
# del button
self.del_button = QPushButton(self)
self.del_button.setText(' Del ')
self.del_button.clicked.connect(self.delete_clicked)
# merge layouts
self.grid_layout.addWidget(self.up_button, 0, 0)
self.grid_layout.addWidget(self.down_button, 1, 0)
self.grid_layout.addWidget(self.type_combo_box, 0, 1)
self.grid_layout.addWidget(self.label_line_edit, 1, 1)
self.grid_layout.addWidget(self.type_combo_box, 0, 1)
self.grid_layout.addWidget(self.del_button, 0, 2, 2, 1)
def get_type(self):
return self.type_combo_box.currentText()
def get_label(self):
return self.label_line_edit.text()
def set_type(self, new_type):
self.type_combo_box.setCurrentText(new_type)
def set_label(self, new_label):
self.label_line_edit.setText(new_label)
def delete_clicked(self):
ret = QMessageBox.warning(self, 'Output', 'Do you really want to delete this input? All changes'
'will be lost.',
QMessageBox.Yes, QMessageBox.No)
if ret == QMessageBox.Yes:
self.content_widget.delete_output(self) | <filename>pyScript_NodeManager/NodeOutput.py
from PySide2.QtWidgets import QWidget, QGridLayout, QPushButton, QComboBox, QLineEdit, QMessageBox
class NodeOutput(QWidget):
def __init__(self, content_widget):
super(NodeOutput, self).__init__()
self.content_widget = content_widget
# create UI
# create all layouts
self.grid_layout = QGridLayout(self)
# move buttons
self.up_button = QPushButton(self, '')
self.down_button = QPushButton(self, '')
# type and label
self.type_combo_box = QComboBox(self)
self.type_combo_box.addItem('exec')
self.type_combo_box.addItem('data')
self.label_line_edit = QLineEdit(self)
self.label_line_edit.setPlaceholderText('Label')
# del button
self.del_button = QPushButton(self)
self.del_button.setText(' Del ')
self.del_button.clicked.connect(self.delete_clicked)
# merge layouts
self.grid_layout.addWidget(self.up_button, 0, 0)
self.grid_layout.addWidget(self.down_button, 1, 0)
self.grid_layout.addWidget(self.type_combo_box, 0, 1)
self.grid_layout.addWidget(self.label_line_edit, 1, 1)
self.grid_layout.addWidget(self.type_combo_box, 0, 1)
self.grid_layout.addWidget(self.del_button, 0, 2, 2, 1)
def get_type(self):
return self.type_combo_box.currentText()
def get_label(self):
return self.label_line_edit.text()
def set_type(self, new_type):
self.type_combo_box.setCurrentText(new_type)
def set_label(self, new_label):
self.label_line_edit.setText(new_label)
def delete_clicked(self):
ret = QMessageBox.warning(self, 'Output', 'Do you really want to delete this input? All changes'
'will be lost.',
QMessageBox.Yes, QMessageBox.No)
if ret == QMessageBox.Yes:
self.content_widget.delete_output(self) | en | 0.318761 | # create UI # create all layouts # move buttons # type and label # del button # merge layouts | 2.45793 | 2 |
vpv/utils/dummy_def_fields.py | Dorky-Lever/vpv | 2 | 6630370 | <gh_stars>1-10
import numpy as np
import math
import SimpleITK as sitk
test_vector_out = '/home/neil/share/deformations_test/test_vector_def.nrrd'
test_vector_out_mhd = '/home/neil/share/deformations_test/test_vector_def.mhd'
def rotate_vector(vector, theta):
theta = math.radians(theta)
x_temp = float(vector[0])
x = vector[0] * math.cos(theta) - vector[1] * math.sin(theta)
y = x_temp * math.sin(theta) + vector[1] * math.cos(theta)
return x, y
a = np.zeros(100*100*100*3).reshape((100, 100, 100, 3))
# Make a vector field of 2
a.fill(2)
# For each slice, rotate 45 degress
theta = -90
for y in range(a.shape[1]):
theta += 90
if theta > 360:
theta = 0
for z in range(a.shape[0]):
for x in range(a.shape[2]):
rotated = rotate_vector(a[z,y,x, [0,1]], theta)
a[z, y, x,[0,1]] = rotated
out = sitk.GetImageFromArray(a)
sitk.WriteImage(out, test_vector_out)
sitk.WriteImage(out, test_vector_out_mhd)
| import numpy as np
import math
import SimpleITK as sitk
test_vector_out = '/home/neil/share/deformations_test/test_vector_def.nrrd'
test_vector_out_mhd = '/home/neil/share/deformations_test/test_vector_def.mhd'
def rotate_vector(vector, theta):
theta = math.radians(theta)
x_temp = float(vector[0])
x = vector[0] * math.cos(theta) - vector[1] * math.sin(theta)
y = x_temp * math.sin(theta) + vector[1] * math.cos(theta)
return x, y
a = np.zeros(100*100*100*3).reshape((100, 100, 100, 3))
# Make a vector field of 2
a.fill(2)
# For each slice, rotate 45 degress
theta = -90
for y in range(a.shape[1]):
theta += 90
if theta > 360:
theta = 0
for z in range(a.shape[0]):
for x in range(a.shape[2]):
rotated = rotate_vector(a[z,y,x, [0,1]], theta)
a[z, y, x,[0,1]] = rotated
out = sitk.GetImageFromArray(a)
sitk.WriteImage(out, test_vector_out)
sitk.WriteImage(out, test_vector_out_mhd) | en | 0.707365 | # Make a vector field of 2 # For each slice, rotate 45 degress | 3.048842 | 3 |
LinkedinController.py | DataScienceResearchPeru/linkedin-profile-scraper | 56 | 6630371 | import time
import ConfigParser
from SeleniumHelper import SeleniumHelper
from selenium import webdriver
class LinkedinController(SeleniumHelper):
# CONFIG
TIMEOUT = 7
data = {}
mode = 'PUBLIC'
SECTIONS = {}
FIELDS = {}
CONTAINER = {}
INITIAL_URL = 'https://www.linkedin.com'
LOGIN_USER_PATH = '#login-email'
LOGIN_PASS_PATH = <PASSWORD>'
LOGIN_USER_VALUE = ''
LOGIN_PASS_VALUE = ''
# PUBLIC
CONTAINER['PUBLIC'] = '#profile'
SECTIONS['PUBLIC'] = {}
FIELDS['PUBLIC'] = {}
SECTIONS['PUBLIC']['NAME'] = {'selector':'#name', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['IMAGE'] = {'selector':'.profile-picture img', 'type':'attr', 'attr':'src', 'quantity':'single'}
SECTIONS['PUBLIC']['CONNECTIONS'] = {'selector':'.member-connections', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['TITLE'] = {'selector':'p.title', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['LOCATION'] = {'selector':'.locality', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['INDUSTRY'] = {'selector':'#demographics dd.descriptor:nth-child(2)', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['RECOMMENDATIONS_NUMBER'] = {'selector':'.extra-info > tbody:nth-child(1) > tr:nth-child(4) > td:nth-child(2) > strong:nth-child(1)', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['SUMMARY'] = {'selector':'#summary .description', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['BRIEF_CURRENT'] = {'selector':'[data-section="currentPositionsDetails"] li', 'quantity':'multiple'}
FIELDS['PUBLIC']['BRIEF_CURRENT'] = {}
FIELDS['PUBLIC']['BRIEF_CURRENT']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['PUBLIC']['BRIEF_CURRENT']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['PUBLIC']['BRIEF_PREVIOUS'] = {'selector':'[data-section="pastPositionsDetails"] li', 'quantity':'multiple'}
FIELDS['PUBLIC']['BRIEF_PREVIOUS'] = {}
FIELDS['PUBLIC']['BRIEF_PREVIOUS']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['PUBLIC']['BRIEF_PREVIOUS']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['PUBLIC']['BRIEF_EDUCATION'] = {'selector':'[data-section="educationsDetails"] li', 'quantity':'multiple'}
FIELDS['PUBLIC']['BRIEF_EDUCATION'] = {}
FIELDS['PUBLIC']['BRIEF_EDUCATION']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['PUBLIC']['BRIEF_EDUCATION']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['PUBLIC']['WEBSITES'] = {'selector':'[data-section="websites"] li', 'quantity':'multiple'}
FIELDS['PUBLIC']['WEBSITES'] = {}
FIELDS['PUBLIC']['WEBSITES']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['PUBLIC']['WEBSITES']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['PUBLIC']['POSTS'] = {'selector':'.post', 'quantity':'multiple'}
FIELDS['PUBLIC']['POSTS'] = {}
FIELDS['PUBLIC']['POSTS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['POSTS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['POSTS']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
FIELDS['PUBLIC']['POSTS']['DATE'] = {'selector':'.time', 'type':'text'}
SECTIONS['PUBLIC']['EXPERIENCE'] = {'selector':'.position', 'quantity':'multiple'}
FIELDS['PUBLIC']['EXPERIENCE'] = {}
FIELDS['PUBLIC']['EXPERIENCE']['TITLE'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['EXPERIENCE']['TITLE_URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['EXPERIENCE']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
FIELDS['PUBLIC']['EXPERIENCE']['COMPANY'] = {'selector':'.item-subtitle a', 'type':'text'}
FIELDS['PUBLIC']['EXPERIENCE']['COMPANY_URL'] = {'selector':'.item-subtitle a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['EXPERIENCE']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['EXPERIENCE']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['PUBLIC']['VOLUNTEER_POSITION'] = {'selector':'#volunteering .position', 'quantity':'multiple'}
FIELDS['PUBLIC']['VOLUNTEER_POSITION'] = {}
FIELDS['PUBLIC']['VOLUNTEER_POSITION']['TITLE'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['VOLUNTEER_POSITION']['COMPANY'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['VOLUNTEER_POSITION']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['VOLUNTEER_POSITION']['CAUSE'] = {'selector':'.cause', 'type':'text'}
FIELDS['PUBLIC']['VOLUNTEER_POSITION']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['PUBLIC']['VOLUNTEER_OPPORTUNITIES'] = {'selector':'#volunteering div.opportunities.extra-section li', 'quantity':'multiple'}
FIELDS['PUBLIC']['VOLUNTEER_OPPORTUNITIES'] = {}
FIELDS['PUBLIC']['VOLUNTEER_OPPORTUNITIES']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['PUBLIC']['VOLUNTEER_CAUSES'] = {'selector':'#volunteering div.extra-section:nth-child(2) > ul:nth-child(2) > li', 'quantity':'multiple'}
FIELDS['PUBLIC']['VOLUNTEER_CAUSES'] = {}
FIELDS['PUBLIC']['VOLUNTEER_CAUSES']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['PUBLIC']['VOLUNTEER_SUPPORT'] = {'selector':'#volunteering div.extra-section:nth-child(3) > ul:nth-child(2) > li', 'quantity':'multiple'}
FIELDS['PUBLIC']['VOLUNTEER_SUPPORT'] = {}
FIELDS['PUBLIC']['VOLUNTEER_SUPPORT']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['PUBLIC']['PUBLICATIONS'] = {'selector':'.publication', 'quantity':'multiple'}
FIELDS['PUBLIC']['PUBLICATIONS'] = {}
FIELDS['PUBLIC']['PUBLICATIONS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['PUBLICATIONS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['PUBLICATIONS']['PLACE'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['PUBLICATIONS']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['PUBLICATIONS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['PUBLIC']['PUBLICATIONS']['CONTRIBUTORS'] = {'selector':'.contributors', 'type':'text'}
SECTIONS['PUBLIC']['COURSES'] = {'selector':'.course', 'quantity':'multiple'}
FIELDS['PUBLIC']['COURSES'] = {}
FIELDS['PUBLIC']['COURSES']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['PUBLIC']['PROJECTS'] = {'selector':'.project', 'quantity':'multiple'}
FIELDS['PUBLIC']['PROJECTS'] = {}
FIELDS['PUBLIC']['PROJECTS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['PROJECTS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['PROJECTS']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['PROJECTS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['PUBLIC']['PROJECTS']['CONTRIBUTORS'] = {'selector':'.contributors', 'type':'text'}
SECTIONS['PUBLIC']['AWARDS'] = {'selector':'.award', 'quantity':'multiple'}
FIELDS['PUBLIC']['AWARDS'] = {}
FIELDS['PUBLIC']['AWARDS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['AWARDS']['COMPANY'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['AWARDS']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['AWARDS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['PUBLIC']['LANGUAGES'] = {'selector':'.language', 'quantity':'multiple'}
FIELDS['PUBLIC']['LANGUAGES'] = {}
FIELDS['PUBLIC']['LANGUAGES']['NAME'] = {'selector':'.name', 'type':'text'}
FIELDS['PUBLIC']['LANGUAGES']['LEVEL'] = {'selector':'.proficiency', 'type':'text'}
SECTIONS['PUBLIC']['SKILLS'] = {'selector':'.skill', 'quantity':'multiple'}
FIELDS['PUBLIC']['SKILLS'] = {}
FIELDS['PUBLIC']['SKILLS']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['PUBLIC']['SKILLS']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['PUBLIC']['EDUCATION'] = {'selector':'.school', 'quantity':'multiple'}
FIELDS['PUBLIC']['EDUCATION'] = {}
FIELDS['PUBLIC']['EDUCATION']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['EDUCATION']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['EDUCATION']['DEGREE'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['EDUCATION']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['EDUCATION']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['PUBLIC']['EDUCATION']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
SECTIONS['PUBLIC']['INTERESTS'] = {'selector':'.interest', 'quantity':'multiple'}
FIELDS['PUBLIC']['INTERESTS'] = {}
FIELDS['PUBLIC']['INTERESTS']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['PUBLIC']['INTERESTS']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['PUBLIC']['CERTIFICATIONS'] = {'selector':'.certification', 'quantity':'multiple'}
FIELDS['PUBLIC']['CERTIFICATIONS'] = {}
FIELDS['PUBLIC']['CERTIFICATIONS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['CERTIFICATIONS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['CERTIFICATIONS']['DEGREE'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['CERTIFICATIONS']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['CERTIFICATIONS']['IMG'] = {'selector':'.logo img', 'type':'attr', 'attr':'src'}
SECTIONS['PUBLIC']['ORGANIZATIONS'] = {'selector':'#organizations li', 'quantity':'multiple'}
FIELDS['PUBLIC']['ORGANIZATIONS'] = {}
FIELDS['PUBLIC']['ORGANIZATIONS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['ORGANIZATIONS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['ORGANIZATIONS']['DEGREE'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['ORGANIZATIONS']['DATE'] = {'selector':'.date-range', 'type':'text'}
SECTIONS['PUBLIC']['PATENTS'] = {'selector':'.patent', 'quantity':'multiple'}
FIELDS['PUBLIC']['PATENTS'] = {}
FIELDS['PUBLIC']['PATENTS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['PATENTS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['PATENTS']['PLACE'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['PATENTS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['PUBLIC']['PATENTS']['CONTRIBUTORS'] = {'selector':'.contributors', 'type':'text'}
SECTIONS['PUBLIC']['SCORES'] = {'selector':'.score', 'quantity':'multiple'}
FIELDS['PUBLIC']['SCORES'] = {}
FIELDS['PUBLIC']['SCORES']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['SCORES']['VALUE'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['SCORES']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['SCORES']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['PUBLIC']['RECOMENDATIONS'] = {'selector':'.recommendation', 'quantity':'multiple'}
FIELDS['PUBLIC']['RECOMENDATIONS'] = {}
FIELDS['PUBLIC']['RECOMENDATIONS']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['PUBLIC']['GROUPS'] = {'selector':'.group', 'quantity':'multiple'}
FIELDS['PUBLIC']['GROUPS'] = {}
FIELDS['PUBLIC']['GROUPS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['GROUPS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['GROUPS']['IMG'] = {'selector':'.logo img', 'type':'attr', 'attr':'src'}
SECTIONS['PUBLIC']['RELATED'] = {'selector':'.profile-card', 'quantity':'multiple'}
FIELDS['PUBLIC']['RELATED'] = {}
FIELDS['PUBLIC']['RELATED']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['RELATED']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['RELATED']['VALUE'] = {'selector':'.headline', 'type':'text'}
FIELDS['PUBLIC']['RELATED']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
# LOGGED
CONTAINER['LOGGED'] = '#profile'
SECTIONS['LOGGED'] = {}
FIELDS['LOGGED'] = {}
SECTIONS['LOGGED']['NAME'] = {'selector':'.full-name', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['IMAGE'] = {'selector':'.profile-picture img', 'type':'attr', 'attr':'src', 'quantity':'single'}
SECTIONS['LOGGED']['CONNECTIONS'] = {'selector':'.connections-link,.member-connections', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['TITLE'] = {'selector':'.title', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['LOCATION'] = {'selector':'#location .locality', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['LOCATION_URL'] = {'selector':'#location .locality a', 'type':'attr', 'attr':'href', 'quantity':'single'}
SECTIONS['LOGGED']['INDUSTRY'] = {'selector':'.industry', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['INDUSTRY_URL'] = {'selector':'.industry a', 'type':'attr', 'attr':'href', 'quantity':'single'}
SECTIONS['LOGGED']['RECOMMENDATIONS_NUMBER'] = {'selector':'.nav-received-tab.all-received', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['SUMMARY'] = {'selector':'.summary', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['CONTACTS_SHARED'] = {'selector':'.shared', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['CONTACTS_NEW'] = {'selector':'.new', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['FRIENDLY_URL'] = {'selector':'.view-public-profile', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['FOLLOWERS'] = {'selector':'.follow-widget-count', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['BRIEF_CURRENT'] = {'selector':'#overview-summary-current li', 'quantity':'multiple'}
FIELDS['LOGGED']['BRIEF_CURRENT'] = {}
FIELDS['LOGGED']['BRIEF_CURRENT']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['BRIEF_CURRENT']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['BRIEF_PREVIOUS'] = {'selector':'#overview-summary-past li', 'quantity':'multiple'}
FIELDS['LOGGED']['BRIEF_PREVIOUS'] = {}
FIELDS['LOGGED']['BRIEF_PREVIOUS']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['BRIEF_PREVIOUS']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['BRIEF_EDUCATION'] = {'selector':'#overview-summary-education li', 'quantity':'multiple'}
FIELDS['LOGGED']['BRIEF_EDUCATION'] = {}
FIELDS['LOGGED']['BRIEF_EDUCATION']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['BRIEF_EDUCATION']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['EMAILS'] = {'selector':'#email-view li', 'quantity':'multiple'}
FIELDS['LOGGED']['EMAILS'] = {}
FIELDS['LOGGED']['EMAILS']['EMAIL'] = {'selector':'', 'type':'text'}
FIELDS['LOGGED']['EMAILS']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['IMS'] = {'selector':'#im-view li', 'quantity':'multiple'}
FIELDS['LOGGED']['IMS'] = {}
FIELDS['LOGGED']['IMS']['IM'] = {'selector':'', 'type':'text'}
SECTIONS['LOGGED']['PHONES'] = {'selector':'#phone-view li', 'quantity':'multiple'}
FIELDS['LOGGED']['PHONES'] = {}
FIELDS['LOGGED']['PHONES']['PHONE'] = {'selector':'', 'type':'text'}
SECTIONS['LOGGED']['WEBSITES'] = {'selector':'#website-view li', 'quantity':'multiple'}
FIELDS['LOGGED']['WEBSITES'] = {}
FIELDS['LOGGED']['WEBSITES']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['WEBSITES']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['TWITTER'] = {'selector':'#twitter-view li', 'quantity':'multiple'}
FIELDS['LOGGED']['TWITTER'] = {}
FIELDS['LOGGED']['TWITTER']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['TWITTER']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['ATTACHMENTS'] = {'selector':'#summary-item .media-cell', 'quantity':'multiple'}
FIELDS['LOGGED']['ATTACHMENTS'] = {}
FIELDS['LOGGED']['ATTACHMENTS']['NAME'] = {'selector':'.description', 'type':'text'}
FIELDS['LOGGED']['ATTACHMENTS']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
SECTIONS['LOGGED']['POSTS'] = {'selector':'.influencer-posts-list > li', 'quantity':'multiple'}
FIELDS['LOGGED']['POSTS'] = {}
FIELDS['LOGGED']['POSTS']['NAME'] = {'selector':'.influencer-post-title', 'type':'text'}
FIELDS['LOGGED']['POSTS']['URL'] = {'selector':'.influencer-post-title a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['POSTS']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
FIELDS['LOGGED']['POSTS']['DATE'] = {'selector':'.influencer-post-published', 'type':'text'}
SECTIONS['LOGGED']['EXPERIENCE'] = {'selector':'.current-position,past-position', 'quantity':'multiple'}
FIELDS['LOGGED']['EXPERIENCE'] = {}
FIELDS['LOGGED']['EXPERIENCE']['TITLE'] = {'selector':'div > header > h4 > a', 'type':'text'}
FIELDS['LOGGED']['EXPERIENCE']['TITLE_URL'] = {'selector':'div > header > h4 > a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['EXPERIENCE']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
FIELDS['LOGGED']['EXPERIENCE']['COMPANY'] = {'selector':'div > header > h5:nth-child(3) > span > strong > a', 'type':'text'}
FIELDS['LOGGED']['EXPERIENCE']['COMPANY_URL'] = {'selector':'div > header > h5:nth-child(3) > span > strong > a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['EXPERIENCE']['DATE'] = {'selector':'.experience-date-locale', 'type':'text'}
FIELDS['LOGGED']['EXPERIENCE']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['LOGGED']['VOLUNTEER_POSITION'] = {'selector':'#background-volunteering > div', 'quantity':'multiple'}
FIELDS['LOGGED']['VOLUNTEER_POSITION'] = {}
FIELDS['LOGGED']['VOLUNTEER_POSITION']['TITLE'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['VOLUNTEER_POSITION']['COMPANY'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['VOLUNTEER_POSITION']['DATE'] = {'selector':'.volunteering-date-cause time', 'type':'text'}
FIELDS['LOGGED']['VOLUNTEER_POSITION']['CAUSE'] = {'selector':'.locality', 'type':'text'}
FIELDS['LOGGED']['VOLUNTEER_POSITION']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['LOGGED']['VOLUNTEER_OPPORTUNITIES'] = {'selector':'.volunteering-opportunities > li', 'quantity':'multiple'}
FIELDS['LOGGED']['VOLUNTEER_OPPORTUNITIES'] = {}
FIELDS['LOGGED']['VOLUNTEER_OPPORTUNITIES']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['LOGGED']['VOLUNTEER_CAUSES'] = {'selector':'.interests .volunteering-listing > li', 'quantity':'multiple'}
FIELDS['LOGGED']['VOLUNTEER_CAUSES'] = {}
FIELDS['LOGGED']['VOLUNTEER_CAUSES']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['LOGGED']['VOLUNTEER_SUPPORT'] = {'selector':'.non-profits .volunteering-listing > li', 'quantity':'multiple'}
FIELDS['LOGGED']['VOLUNTEER_SUPPORT'] = {}
FIELDS['LOGGED']['VOLUNTEER_SUPPORT']['NAME'] = {'selector':'', 'type':'text'}
FIELDS['LOGGED']['VOLUNTEER_SUPPORT']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['PUBLICATIONS'] = {'selector':'#background-publications > div', 'quantity':'multiple'}
FIELDS['LOGGED']['PUBLICATIONS'] = {}
FIELDS['LOGGED']['PUBLICATIONS']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['PUBLICATIONS']['URL'] = {'selector':'h4 a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['PUBLICATIONS']['PLACE'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['PUBLICATIONS']['DATE'] = {'selector':'.publication-date', 'type':'text'}
FIELDS['LOGGED']['PUBLICATIONS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['LOGGED']['PUBLICATIONS']['CONTRIBUTORS'] = {'selector':'.contributors', 'type':'text'}
SECTIONS['LOGGED']['COURSES'] = {'selector':'.courses-listing > li', 'quantity':'multiple'}
FIELDS['LOGGED']['COURSES'] = {}
FIELDS['LOGGED']['COURSES']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['LOGGED']['PROJECTS'] = {'selector':'#background-projects > div', 'quantity':'multiple'}
FIELDS['LOGGED']['PROJECTS'] = {}
FIELDS['LOGGED']['PROJECTS']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['PROJECTS']['URL'] = {'selector':'h4 a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['PROJECTS']['DATE'] = {'selector':'.projects-date', 'type':'text'}
FIELDS['LOGGED']['PROJECTS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['LOGGED']['PROJECTS']['CONTRIBUTORS'] = {'selector':'.associated-list', 'type':'text'}
SECTIONS['LOGGED']['AWARDS'] = {'selector':'#background-honors > div', 'quantity':'multiple'}
FIELDS['LOGGED']['AWARDS'] = {}
FIELDS['LOGGED']['AWARDS']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['AWARDS']['COMPANY'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['AWARDS']['DATE'] = {'selector':'.honors-date', 'type':'text'}
FIELDS['LOGGED']['AWARDS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['LOGGED']['LANGUAGES'] = {'selector':'#languages-view > ol > li', 'quantity':'multiple'}
FIELDS['LOGGED']['LANGUAGES'] = {}
FIELDS['LOGGED']['LANGUAGES']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['LANGUAGES']['LEVEL'] = {'selector':'.languages-proficiency', 'type':'text'}
SECTIONS['LOGGED']['SKILLS'] = {'selector':'.skills-section > li', 'quantity':'multiple'}
FIELDS['LOGGED']['SKILLS'] = {}
FIELDS['LOGGED']['SKILLS']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['SKILLS']['ENDORSEMENTS'] = {'selector':'.num-endorsements', 'type':'text'}
FIELDS['LOGGED']['SKILLS']['NUMBER'] = {'selector':'.endorse-item-name-text', 'type':'text'}
FIELDS['LOGGED']['SKILLS']['URL'] = {'selector':'.endorse-item-name-text', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['EDUCATION'] = {'selector':'#background-education > div', 'quantity':'multiple'}
FIELDS['LOGGED']['EDUCATION'] = {}
FIELDS['LOGGED']['EDUCATION']['NAME'] = {'selector':'div > div > header > h4 > a', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['URL'] = {'selector':'div > div > header > h4 > a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['EDUCATION']['DEGREE'] = {'selector':'.degree', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['MAJOR'] = {'selector':'.major', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['GRADE'] = {'selector':'.grade', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['DATE'] = {'selector':'.education-date', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['NOTES'] = {'selector':'.notes', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['IMG'] = {'selector':'.education-logo img', 'type':'attr', 'attr':'src'}
SECTIONS['LOGGED']['INTERESTS'] = {'selector':'.interests-listing > li', 'quantity':'multiple'}
FIELDS['LOGGED']['INTERESTS'] = {}
FIELDS['LOGGED']['INTERESTS']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['INTERESTS']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['CERTIFICATIONS'] = {'selector':'#background-certifications div', 'quantity':'multiple'}
FIELDS['LOGGED']['CERTIFICATIONS'] = {}
FIELDS['LOGGED']['CERTIFICATIONS']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['CERTIFICATIONS']['URL'] = {'selector':'h4 a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['CERTIFICATIONS']['DEGREE'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['CERTIFICATIONS']['DATE'] = {'selector':'.certification-date', 'type':'text'}
FIELDS['LOGGED']['CERTIFICATIONS']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
SECTIONS['LOGGED']['ORGANIZATIONS'] = {'selector':'#background-organizations div', 'quantity':'multiple'}
FIELDS['LOGGED']['ORGANIZATIONS'] = {}
FIELDS['LOGGED']['ORGANIZATIONS']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['ORGANIZATIONS']['URL'] = {'selector':'h4 a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['ORGANIZATIONS']['DEGREE'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['ORGANIZATIONS']['DATE'] = {'selector':'.organizations-date', 'type':'text'}
SECTIONS['LOGGED']['PATENTS'] = {'selector':'#background-patents > div', 'quantity':'multiple'}
FIELDS['LOGGED']['PATENTS'] = {}
FIELDS['LOGGED']['PATENTS']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['PATENTS']['PLACE'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['PATENTS']['DATE'] = {'selector':'.patents-date', 'type':'text'}
SECTIONS['LOGGED']['SCORES'] = {'selector':'#background-test-scores > div', 'quantity':'multiple'}
FIELDS['LOGGED']['SCORES'] = {}
FIELDS['LOGGED']['SCORES']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['SCORES']['VALUE'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['SCORES']['DATE'] = {'selector':'.test-scores-date', 'type':'text'}
SECTIONS['LOGGED']['RECOMENDATIONS_RECEIVED'] = {'selector':'.endorsements-received li', 'quantity':'multiple'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED'] = {}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['POSITION'] = {'selector':'h3', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['PLACE'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['PERSON'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['PROFILE'] = {'selector':'h5 a', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['JOB'] = {'selector':'h6', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['DATE'] = {'selector':'.endorsement-date', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['IMAGE'] = {'selector':'.endorsement-picture img', 'type':'attr', 'attr':'src'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['URL'] = {'selector':'.endorsement-picture a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['RECOMENDATIONS_GIVEN'] = {'selector':'.endorsements-given li', 'quantity':'multiple'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN'] = {}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['NAME'] = {'selector':'', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['POSITION'] = {'selector':'h3', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['PLACE'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['PERSON'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['PROFILE'] = {'selector':'h5 a', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['JOB'] = {'selector':'h6', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['DATE'] = {'selector':'.endorsement-date', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['IMAGE'] = {'selector':'.endorsement-picture img', 'type':'attr', 'attr':'src'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['URL'] = {'selector':'.endorsement-picture a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['GROUPS'] = {'selector':'.groups-container li', 'quantity':'multiple'}
FIELDS['LOGGED']['GROUPS'] = {}
FIELDS['LOGGED']['GROUPS']['NAME'] = {'selector':'.group-link', 'type':'text'}
FIELDS['LOGGED']['GROUPS']['URL'] = {'selector':'.group-link', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['GROUPS']['MEMBERS'] = {'selector':'.groups-stats', 'type':'text'}
FIELDS['LOGGED']['GROUPS']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
SECTIONS['LOGGED']['RELATED'] = {'selector':'.discovery-results li', 'quantity':'multiple'}
FIELDS['LOGGED']['RELATED'] = {}
FIELDS['LOGGED']['RELATED']['NAME'] = {'selector':'img', 'type':'attr', 'attr':'alt'}
FIELDS['LOGGED']['RELATED']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['RELATED']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
FIELDS['LOGGED']['RELATED']['LEVEL'] = {'selector':'.degree-icon', 'type':'text'}
SECTIONS['LOGGED']['SIMILAR'] = {'selector':'.browse-map-list li', 'quantity':'multiple'}
FIELDS['LOGGED']['SIMILAR'] = {}
FIELDS['LOGGED']['SIMILAR']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['SIMILAR']['URL'] = {'selector':'h4 a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['SIMILAR']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
FIELDS['LOGGED']['SIMILAR']['TITLE'] = {'selector':'.browse-map-title', 'type':'text'}
SECTIONS['LOGGED']['FOLLOWING'] = {'selector':'.following-container > li', 'quantity':'multiple'}
FIELDS['LOGGED']['FOLLOWING'] = {}
FIELDS['LOGGED']['FOLLOWING']['NAME'] = {'selector':'.channel-name a,following-name a', 'type':'text'}
FIELDS['LOGGED']['FOLLOWING']['URL'] = {'selector':'.channel-name a,following-name a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['FOLLOWING']['DETAILS'] = {'selector':'.following-stats,.following-name', 'type':'text'}
FIELDS['LOGGED']['FOLLOWING']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
SECTIONS['LOGGED']['PERSONAL'] = {'selector':'#personal-info-view', 'quantity':'multiple'}
FIELDS['LOGGED']['PERSONAL'] = {}
FIELDS['LOGGED']['PERSONAL']['EXTRA'] = {'selector':'', 'type':'text'}
# CODE
def login(self, filename):
config = ConfigParser.ConfigParser()
config.read(filename)
self.LOGIN_USER_VALUE = config.get('credentials', 'login_user_value')
self.LOGIN_PASS_VALUE = config.get('credentials', 'login_pass_value')
self.loadPage(self.INITIAL_URL)
self.waitAndWrite(self.LOGIN_USER_PATH, self.LOGIN_USER_VALUE)
self.submitForm(self.selectAndWrite(self.LOGIN_PASS_PATH, self.LOGIN_PASS_VALUE))
def performClicks(self):
self.clickSelector('#contact-info-tab')
self.clickMultiple('.hidden-view-more')
self.clickMultiple('.toggle-show-more')
self.clickMultiple('.see-action')
self.clickMultiple('.see-more-less')
if self.mode == 'LOGGED':
self.clickMultiple('.see-more')
else:
self.clickMultiple('li.see-more label')
self.clickMultiple('.recommendation label')
time.sleep(0.3)
def get_conn_id(self):
connId = ''
html = self.driver.page_source
arr1 = html.split('connId=')
if len(arr1) > 1:
arr2 = arr1[1].split('&')
connId = arr2[0]
return connId
def extractProfile(self, url):
self.loadAndWait(url, self.CONTAINER[self.mode])
self.performClicks()
self.data = self.extractSection(self.mode)
self.data['friendlyUrl'] = self.driver.current_url
self.data['connId'] = self.get_conn_id()
return self.data
def __init__(self, config=None, debug=False):
if debug:
self.driver = webdriver.Firefox()
else:
self.driver = webdriver.PhantomJS()
self.driver.set_page_load_timeout(self.TIMEOUT)
if config:
self.mode = 'LOGGED'
self.login(config) | import time
import ConfigParser
from SeleniumHelper import SeleniumHelper
from selenium import webdriver
class LinkedinController(SeleniumHelper):
# CONFIG
TIMEOUT = 7
data = {}
mode = 'PUBLIC'
SECTIONS = {}
FIELDS = {}
CONTAINER = {}
INITIAL_URL = 'https://www.linkedin.com'
LOGIN_USER_PATH = '#login-email'
LOGIN_PASS_PATH = <PASSWORD>'
LOGIN_USER_VALUE = ''
LOGIN_PASS_VALUE = ''
# PUBLIC
CONTAINER['PUBLIC'] = '#profile'
SECTIONS['PUBLIC'] = {}
FIELDS['PUBLIC'] = {}
SECTIONS['PUBLIC']['NAME'] = {'selector':'#name', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['IMAGE'] = {'selector':'.profile-picture img', 'type':'attr', 'attr':'src', 'quantity':'single'}
SECTIONS['PUBLIC']['CONNECTIONS'] = {'selector':'.member-connections', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['TITLE'] = {'selector':'p.title', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['LOCATION'] = {'selector':'.locality', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['INDUSTRY'] = {'selector':'#demographics dd.descriptor:nth-child(2)', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['RECOMMENDATIONS_NUMBER'] = {'selector':'.extra-info > tbody:nth-child(1) > tr:nth-child(4) > td:nth-child(2) > strong:nth-child(1)', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['SUMMARY'] = {'selector':'#summary .description', 'type':'text', 'quantity':'single'}
SECTIONS['PUBLIC']['BRIEF_CURRENT'] = {'selector':'[data-section="currentPositionsDetails"] li', 'quantity':'multiple'}
FIELDS['PUBLIC']['BRIEF_CURRENT'] = {}
FIELDS['PUBLIC']['BRIEF_CURRENT']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['PUBLIC']['BRIEF_CURRENT']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['PUBLIC']['BRIEF_PREVIOUS'] = {'selector':'[data-section="pastPositionsDetails"] li', 'quantity':'multiple'}
FIELDS['PUBLIC']['BRIEF_PREVIOUS'] = {}
FIELDS['PUBLIC']['BRIEF_PREVIOUS']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['PUBLIC']['BRIEF_PREVIOUS']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['PUBLIC']['BRIEF_EDUCATION'] = {'selector':'[data-section="educationsDetails"] li', 'quantity':'multiple'}
FIELDS['PUBLIC']['BRIEF_EDUCATION'] = {}
FIELDS['PUBLIC']['BRIEF_EDUCATION']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['PUBLIC']['BRIEF_EDUCATION']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['PUBLIC']['WEBSITES'] = {'selector':'[data-section="websites"] li', 'quantity':'multiple'}
FIELDS['PUBLIC']['WEBSITES'] = {}
FIELDS['PUBLIC']['WEBSITES']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['PUBLIC']['WEBSITES']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['PUBLIC']['POSTS'] = {'selector':'.post', 'quantity':'multiple'}
FIELDS['PUBLIC']['POSTS'] = {}
FIELDS['PUBLIC']['POSTS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['POSTS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['POSTS']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
FIELDS['PUBLIC']['POSTS']['DATE'] = {'selector':'.time', 'type':'text'}
SECTIONS['PUBLIC']['EXPERIENCE'] = {'selector':'.position', 'quantity':'multiple'}
FIELDS['PUBLIC']['EXPERIENCE'] = {}
FIELDS['PUBLIC']['EXPERIENCE']['TITLE'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['EXPERIENCE']['TITLE_URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['EXPERIENCE']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
FIELDS['PUBLIC']['EXPERIENCE']['COMPANY'] = {'selector':'.item-subtitle a', 'type':'text'}
FIELDS['PUBLIC']['EXPERIENCE']['COMPANY_URL'] = {'selector':'.item-subtitle a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['EXPERIENCE']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['EXPERIENCE']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['PUBLIC']['VOLUNTEER_POSITION'] = {'selector':'#volunteering .position', 'quantity':'multiple'}
FIELDS['PUBLIC']['VOLUNTEER_POSITION'] = {}
FIELDS['PUBLIC']['VOLUNTEER_POSITION']['TITLE'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['VOLUNTEER_POSITION']['COMPANY'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['VOLUNTEER_POSITION']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['VOLUNTEER_POSITION']['CAUSE'] = {'selector':'.cause', 'type':'text'}
FIELDS['PUBLIC']['VOLUNTEER_POSITION']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['PUBLIC']['VOLUNTEER_OPPORTUNITIES'] = {'selector':'#volunteering div.opportunities.extra-section li', 'quantity':'multiple'}
FIELDS['PUBLIC']['VOLUNTEER_OPPORTUNITIES'] = {}
FIELDS['PUBLIC']['VOLUNTEER_OPPORTUNITIES']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['PUBLIC']['VOLUNTEER_CAUSES'] = {'selector':'#volunteering div.extra-section:nth-child(2) > ul:nth-child(2) > li', 'quantity':'multiple'}
FIELDS['PUBLIC']['VOLUNTEER_CAUSES'] = {}
FIELDS['PUBLIC']['VOLUNTEER_CAUSES']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['PUBLIC']['VOLUNTEER_SUPPORT'] = {'selector':'#volunteering div.extra-section:nth-child(3) > ul:nth-child(2) > li', 'quantity':'multiple'}
FIELDS['PUBLIC']['VOLUNTEER_SUPPORT'] = {}
FIELDS['PUBLIC']['VOLUNTEER_SUPPORT']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['PUBLIC']['PUBLICATIONS'] = {'selector':'.publication', 'quantity':'multiple'}
FIELDS['PUBLIC']['PUBLICATIONS'] = {}
FIELDS['PUBLIC']['PUBLICATIONS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['PUBLICATIONS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['PUBLICATIONS']['PLACE'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['PUBLICATIONS']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['PUBLICATIONS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['PUBLIC']['PUBLICATIONS']['CONTRIBUTORS'] = {'selector':'.contributors', 'type':'text'}
SECTIONS['PUBLIC']['COURSES'] = {'selector':'.course', 'quantity':'multiple'}
FIELDS['PUBLIC']['COURSES'] = {}
FIELDS['PUBLIC']['COURSES']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['PUBLIC']['PROJECTS'] = {'selector':'.project', 'quantity':'multiple'}
FIELDS['PUBLIC']['PROJECTS'] = {}
FIELDS['PUBLIC']['PROJECTS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['PROJECTS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['PROJECTS']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['PROJECTS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['PUBLIC']['PROJECTS']['CONTRIBUTORS'] = {'selector':'.contributors', 'type':'text'}
SECTIONS['PUBLIC']['AWARDS'] = {'selector':'.award', 'quantity':'multiple'}
FIELDS['PUBLIC']['AWARDS'] = {}
FIELDS['PUBLIC']['AWARDS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['AWARDS']['COMPANY'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['AWARDS']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['AWARDS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['PUBLIC']['LANGUAGES'] = {'selector':'.language', 'quantity':'multiple'}
FIELDS['PUBLIC']['LANGUAGES'] = {}
FIELDS['PUBLIC']['LANGUAGES']['NAME'] = {'selector':'.name', 'type':'text'}
FIELDS['PUBLIC']['LANGUAGES']['LEVEL'] = {'selector':'.proficiency', 'type':'text'}
SECTIONS['PUBLIC']['SKILLS'] = {'selector':'.skill', 'quantity':'multiple'}
FIELDS['PUBLIC']['SKILLS'] = {}
FIELDS['PUBLIC']['SKILLS']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['PUBLIC']['SKILLS']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['PUBLIC']['EDUCATION'] = {'selector':'.school', 'quantity':'multiple'}
FIELDS['PUBLIC']['EDUCATION'] = {}
FIELDS['PUBLIC']['EDUCATION']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['EDUCATION']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['EDUCATION']['DEGREE'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['EDUCATION']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['EDUCATION']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['PUBLIC']['EDUCATION']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
SECTIONS['PUBLIC']['INTERESTS'] = {'selector':'.interest', 'quantity':'multiple'}
FIELDS['PUBLIC']['INTERESTS'] = {}
FIELDS['PUBLIC']['INTERESTS']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['PUBLIC']['INTERESTS']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['PUBLIC']['CERTIFICATIONS'] = {'selector':'.certification', 'quantity':'multiple'}
FIELDS['PUBLIC']['CERTIFICATIONS'] = {}
FIELDS['PUBLIC']['CERTIFICATIONS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['CERTIFICATIONS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['CERTIFICATIONS']['DEGREE'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['CERTIFICATIONS']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['CERTIFICATIONS']['IMG'] = {'selector':'.logo img', 'type':'attr', 'attr':'src'}
SECTIONS['PUBLIC']['ORGANIZATIONS'] = {'selector':'#organizations li', 'quantity':'multiple'}
FIELDS['PUBLIC']['ORGANIZATIONS'] = {}
FIELDS['PUBLIC']['ORGANIZATIONS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['ORGANIZATIONS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['ORGANIZATIONS']['DEGREE'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['ORGANIZATIONS']['DATE'] = {'selector':'.date-range', 'type':'text'}
SECTIONS['PUBLIC']['PATENTS'] = {'selector':'.patent', 'quantity':'multiple'}
FIELDS['PUBLIC']['PATENTS'] = {}
FIELDS['PUBLIC']['PATENTS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['PATENTS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['PATENTS']['PLACE'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['PATENTS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['PUBLIC']['PATENTS']['CONTRIBUTORS'] = {'selector':'.contributors', 'type':'text'}
SECTIONS['PUBLIC']['SCORES'] = {'selector':'.score', 'quantity':'multiple'}
FIELDS['PUBLIC']['SCORES'] = {}
FIELDS['PUBLIC']['SCORES']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['SCORES']['VALUE'] = {'selector':'.item-subtitle', 'type':'text'}
FIELDS['PUBLIC']['SCORES']['DATE'] = {'selector':'.date-range', 'type':'text'}
FIELDS['PUBLIC']['SCORES']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['PUBLIC']['RECOMENDATIONS'] = {'selector':'.recommendation', 'quantity':'multiple'}
FIELDS['PUBLIC']['RECOMENDATIONS'] = {}
FIELDS['PUBLIC']['RECOMENDATIONS']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['PUBLIC']['GROUPS'] = {'selector':'.group', 'quantity':'multiple'}
FIELDS['PUBLIC']['GROUPS'] = {}
FIELDS['PUBLIC']['GROUPS']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['GROUPS']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['GROUPS']['IMG'] = {'selector':'.logo img', 'type':'attr', 'attr':'src'}
SECTIONS['PUBLIC']['RELATED'] = {'selector':'.profile-card', 'quantity':'multiple'}
FIELDS['PUBLIC']['RELATED'] = {}
FIELDS['PUBLIC']['RELATED']['NAME'] = {'selector':'.item-title', 'type':'text'}
FIELDS['PUBLIC']['RELATED']['URL'] = {'selector':'.item-title a', 'type':'attr', 'attr':'href'}
FIELDS['PUBLIC']['RELATED']['VALUE'] = {'selector':'.headline', 'type':'text'}
FIELDS['PUBLIC']['RELATED']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
# LOGGED
CONTAINER['LOGGED'] = '#profile'
SECTIONS['LOGGED'] = {}
FIELDS['LOGGED'] = {}
SECTIONS['LOGGED']['NAME'] = {'selector':'.full-name', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['IMAGE'] = {'selector':'.profile-picture img', 'type':'attr', 'attr':'src', 'quantity':'single'}
SECTIONS['LOGGED']['CONNECTIONS'] = {'selector':'.connections-link,.member-connections', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['TITLE'] = {'selector':'.title', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['LOCATION'] = {'selector':'#location .locality', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['LOCATION_URL'] = {'selector':'#location .locality a', 'type':'attr', 'attr':'href', 'quantity':'single'}
SECTIONS['LOGGED']['INDUSTRY'] = {'selector':'.industry', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['INDUSTRY_URL'] = {'selector':'.industry a', 'type':'attr', 'attr':'href', 'quantity':'single'}
SECTIONS['LOGGED']['RECOMMENDATIONS_NUMBER'] = {'selector':'.nav-received-tab.all-received', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['SUMMARY'] = {'selector':'.summary', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['CONTACTS_SHARED'] = {'selector':'.shared', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['CONTACTS_NEW'] = {'selector':'.new', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['FRIENDLY_URL'] = {'selector':'.view-public-profile', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['FOLLOWERS'] = {'selector':'.follow-widget-count', 'type':'text', 'quantity':'single'}
SECTIONS['LOGGED']['BRIEF_CURRENT'] = {'selector':'#overview-summary-current li', 'quantity':'multiple'}
FIELDS['LOGGED']['BRIEF_CURRENT'] = {}
FIELDS['LOGGED']['BRIEF_CURRENT']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['BRIEF_CURRENT']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['BRIEF_PREVIOUS'] = {'selector':'#overview-summary-past li', 'quantity':'multiple'}
FIELDS['LOGGED']['BRIEF_PREVIOUS'] = {}
FIELDS['LOGGED']['BRIEF_PREVIOUS']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['BRIEF_PREVIOUS']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['BRIEF_EDUCATION'] = {'selector':'#overview-summary-education li', 'quantity':'multiple'}
FIELDS['LOGGED']['BRIEF_EDUCATION'] = {}
FIELDS['LOGGED']['BRIEF_EDUCATION']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['BRIEF_EDUCATION']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['EMAILS'] = {'selector':'#email-view li', 'quantity':'multiple'}
FIELDS['LOGGED']['EMAILS'] = {}
FIELDS['LOGGED']['EMAILS']['EMAIL'] = {'selector':'', 'type':'text'}
FIELDS['LOGGED']['EMAILS']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['IMS'] = {'selector':'#im-view li', 'quantity':'multiple'}
FIELDS['LOGGED']['IMS'] = {}
FIELDS['LOGGED']['IMS']['IM'] = {'selector':'', 'type':'text'}
SECTIONS['LOGGED']['PHONES'] = {'selector':'#phone-view li', 'quantity':'multiple'}
FIELDS['LOGGED']['PHONES'] = {}
FIELDS['LOGGED']['PHONES']['PHONE'] = {'selector':'', 'type':'text'}
SECTIONS['LOGGED']['WEBSITES'] = {'selector':'#website-view li', 'quantity':'multiple'}
FIELDS['LOGGED']['WEBSITES'] = {}
FIELDS['LOGGED']['WEBSITES']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['WEBSITES']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['TWITTER'] = {'selector':'#twitter-view li', 'quantity':'multiple'}
FIELDS['LOGGED']['TWITTER'] = {}
FIELDS['LOGGED']['TWITTER']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['TWITTER']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['ATTACHMENTS'] = {'selector':'#summary-item .media-cell', 'quantity':'multiple'}
FIELDS['LOGGED']['ATTACHMENTS'] = {}
FIELDS['LOGGED']['ATTACHMENTS']['NAME'] = {'selector':'.description', 'type':'text'}
FIELDS['LOGGED']['ATTACHMENTS']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
SECTIONS['LOGGED']['POSTS'] = {'selector':'.influencer-posts-list > li', 'quantity':'multiple'}
FIELDS['LOGGED']['POSTS'] = {}
FIELDS['LOGGED']['POSTS']['NAME'] = {'selector':'.influencer-post-title', 'type':'text'}
FIELDS['LOGGED']['POSTS']['URL'] = {'selector':'.influencer-post-title a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['POSTS']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
FIELDS['LOGGED']['POSTS']['DATE'] = {'selector':'.influencer-post-published', 'type':'text'}
SECTIONS['LOGGED']['EXPERIENCE'] = {'selector':'.current-position,past-position', 'quantity':'multiple'}
FIELDS['LOGGED']['EXPERIENCE'] = {}
FIELDS['LOGGED']['EXPERIENCE']['TITLE'] = {'selector':'div > header > h4 > a', 'type':'text'}
FIELDS['LOGGED']['EXPERIENCE']['TITLE_URL'] = {'selector':'div > header > h4 > a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['EXPERIENCE']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
FIELDS['LOGGED']['EXPERIENCE']['COMPANY'] = {'selector':'div > header > h5:nth-child(3) > span > strong > a', 'type':'text'}
FIELDS['LOGGED']['EXPERIENCE']['COMPANY_URL'] = {'selector':'div > header > h5:nth-child(3) > span > strong > a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['EXPERIENCE']['DATE'] = {'selector':'.experience-date-locale', 'type':'text'}
FIELDS['LOGGED']['EXPERIENCE']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['LOGGED']['VOLUNTEER_POSITION'] = {'selector':'#background-volunteering > div', 'quantity':'multiple'}
FIELDS['LOGGED']['VOLUNTEER_POSITION'] = {}
FIELDS['LOGGED']['VOLUNTEER_POSITION']['TITLE'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['VOLUNTEER_POSITION']['COMPANY'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['VOLUNTEER_POSITION']['DATE'] = {'selector':'.volunteering-date-cause time', 'type':'text'}
FIELDS['LOGGED']['VOLUNTEER_POSITION']['CAUSE'] = {'selector':'.locality', 'type':'text'}
FIELDS['LOGGED']['VOLUNTEER_POSITION']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['LOGGED']['VOLUNTEER_OPPORTUNITIES'] = {'selector':'.volunteering-opportunities > li', 'quantity':'multiple'}
FIELDS['LOGGED']['VOLUNTEER_OPPORTUNITIES'] = {}
FIELDS['LOGGED']['VOLUNTEER_OPPORTUNITIES']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['LOGGED']['VOLUNTEER_CAUSES'] = {'selector':'.interests .volunteering-listing > li', 'quantity':'multiple'}
FIELDS['LOGGED']['VOLUNTEER_CAUSES'] = {}
FIELDS['LOGGED']['VOLUNTEER_CAUSES']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['LOGGED']['VOLUNTEER_SUPPORT'] = {'selector':'.non-profits .volunteering-listing > li', 'quantity':'multiple'}
FIELDS['LOGGED']['VOLUNTEER_SUPPORT'] = {}
FIELDS['LOGGED']['VOLUNTEER_SUPPORT']['NAME'] = {'selector':'', 'type':'text'}
FIELDS['LOGGED']['VOLUNTEER_SUPPORT']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['PUBLICATIONS'] = {'selector':'#background-publications > div', 'quantity':'multiple'}
FIELDS['LOGGED']['PUBLICATIONS'] = {}
FIELDS['LOGGED']['PUBLICATIONS']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['PUBLICATIONS']['URL'] = {'selector':'h4 a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['PUBLICATIONS']['PLACE'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['PUBLICATIONS']['DATE'] = {'selector':'.publication-date', 'type':'text'}
FIELDS['LOGGED']['PUBLICATIONS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['LOGGED']['PUBLICATIONS']['CONTRIBUTORS'] = {'selector':'.contributors', 'type':'text'}
SECTIONS['LOGGED']['COURSES'] = {'selector':'.courses-listing > li', 'quantity':'multiple'}
FIELDS['LOGGED']['COURSES'] = {}
FIELDS['LOGGED']['COURSES']['NAME'] = {'selector':'', 'type':'text'}
SECTIONS['LOGGED']['PROJECTS'] = {'selector':'#background-projects > div', 'quantity':'multiple'}
FIELDS['LOGGED']['PROJECTS'] = {}
FIELDS['LOGGED']['PROJECTS']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['PROJECTS']['URL'] = {'selector':'h4 a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['PROJECTS']['DATE'] = {'selector':'.projects-date', 'type':'text'}
FIELDS['LOGGED']['PROJECTS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['LOGGED']['PROJECTS']['CONTRIBUTORS'] = {'selector':'.associated-list', 'type':'text'}
SECTIONS['LOGGED']['AWARDS'] = {'selector':'#background-honors > div', 'quantity':'multiple'}
FIELDS['LOGGED']['AWARDS'] = {}
FIELDS['LOGGED']['AWARDS']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['AWARDS']['COMPANY'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['AWARDS']['DATE'] = {'selector':'.honors-date', 'type':'text'}
FIELDS['LOGGED']['AWARDS']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
SECTIONS['LOGGED']['LANGUAGES'] = {'selector':'#languages-view > ol > li', 'quantity':'multiple'}
FIELDS['LOGGED']['LANGUAGES'] = {}
FIELDS['LOGGED']['LANGUAGES']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['LANGUAGES']['LEVEL'] = {'selector':'.languages-proficiency', 'type':'text'}
SECTIONS['LOGGED']['SKILLS'] = {'selector':'.skills-section > li', 'quantity':'multiple'}
FIELDS['LOGGED']['SKILLS'] = {}
FIELDS['LOGGED']['SKILLS']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['SKILLS']['ENDORSEMENTS'] = {'selector':'.num-endorsements', 'type':'text'}
FIELDS['LOGGED']['SKILLS']['NUMBER'] = {'selector':'.endorse-item-name-text', 'type':'text'}
FIELDS['LOGGED']['SKILLS']['URL'] = {'selector':'.endorse-item-name-text', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['EDUCATION'] = {'selector':'#background-education > div', 'quantity':'multiple'}
FIELDS['LOGGED']['EDUCATION'] = {}
FIELDS['LOGGED']['EDUCATION']['NAME'] = {'selector':'div > div > header > h4 > a', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['URL'] = {'selector':'div > div > header > h4 > a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['EDUCATION']['DEGREE'] = {'selector':'.degree', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['MAJOR'] = {'selector':'.major', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['GRADE'] = {'selector':'.grade', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['DATE'] = {'selector':'.education-date', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['NOTES'] = {'selector':'.notes', 'type':'text'}
FIELDS['LOGGED']['EDUCATION']['IMG'] = {'selector':'.education-logo img', 'type':'attr', 'attr':'src'}
SECTIONS['LOGGED']['INTERESTS'] = {'selector':'.interests-listing > li', 'quantity':'multiple'}
FIELDS['LOGGED']['INTERESTS'] = {}
FIELDS['LOGGED']['INTERESTS']['NAME'] = {'selector':'a', 'type':'text'}
FIELDS['LOGGED']['INTERESTS']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['CERTIFICATIONS'] = {'selector':'#background-certifications div', 'quantity':'multiple'}
FIELDS['LOGGED']['CERTIFICATIONS'] = {}
FIELDS['LOGGED']['CERTIFICATIONS']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['CERTIFICATIONS']['URL'] = {'selector':'h4 a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['CERTIFICATIONS']['DEGREE'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['CERTIFICATIONS']['DATE'] = {'selector':'.certification-date', 'type':'text'}
FIELDS['LOGGED']['CERTIFICATIONS']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
SECTIONS['LOGGED']['ORGANIZATIONS'] = {'selector':'#background-organizations div', 'quantity':'multiple'}
FIELDS['LOGGED']['ORGANIZATIONS'] = {}
FIELDS['LOGGED']['ORGANIZATIONS']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['ORGANIZATIONS']['URL'] = {'selector':'h4 a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['ORGANIZATIONS']['DEGREE'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['ORGANIZATIONS']['DATE'] = {'selector':'.organizations-date', 'type':'text'}
SECTIONS['LOGGED']['PATENTS'] = {'selector':'#background-patents > div', 'quantity':'multiple'}
FIELDS['LOGGED']['PATENTS'] = {}
FIELDS['LOGGED']['PATENTS']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['PATENTS']['PLACE'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['PATENTS']['DATE'] = {'selector':'.patents-date', 'type':'text'}
SECTIONS['LOGGED']['SCORES'] = {'selector':'#background-test-scores > div', 'quantity':'multiple'}
FIELDS['LOGGED']['SCORES'] = {}
FIELDS['LOGGED']['SCORES']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['SCORES']['VALUE'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['SCORES']['DATE'] = {'selector':'.test-scores-date', 'type':'text'}
SECTIONS['LOGGED']['RECOMENDATIONS_RECEIVED'] = {'selector':'.endorsements-received li', 'quantity':'multiple'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED'] = {}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['POSITION'] = {'selector':'h3', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['PLACE'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['PERSON'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['PROFILE'] = {'selector':'h5 a', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['JOB'] = {'selector':'h6', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['DATE'] = {'selector':'.endorsement-date', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['IMAGE'] = {'selector':'.endorsement-picture img', 'type':'attr', 'attr':'src'}
FIELDS['LOGGED']['RECOMENDATIONS_RECEIVED']['URL'] = {'selector':'.endorsement-picture a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['RECOMENDATIONS_GIVEN'] = {'selector':'.endorsements-given li', 'quantity':'multiple'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN'] = {}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['NAME'] = {'selector':'', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['POSITION'] = {'selector':'h3', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['PLACE'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['PERSON'] = {'selector':'h5', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['PROFILE'] = {'selector':'h5 a', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['JOB'] = {'selector':'h6', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['DESCRIPTION'] = {'selector':'.description', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['DATE'] = {'selector':'.endorsement-date', 'type':'text'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['IMAGE'] = {'selector':'.endorsement-picture img', 'type':'attr', 'attr':'src'}
FIELDS['LOGGED']['RECOMENDATIONS_GIVEN']['URL'] = {'selector':'.endorsement-picture a', 'type':'attr', 'attr':'href'}
SECTIONS['LOGGED']['GROUPS'] = {'selector':'.groups-container li', 'quantity':'multiple'}
FIELDS['LOGGED']['GROUPS'] = {}
FIELDS['LOGGED']['GROUPS']['NAME'] = {'selector':'.group-link', 'type':'text'}
FIELDS['LOGGED']['GROUPS']['URL'] = {'selector':'.group-link', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['GROUPS']['MEMBERS'] = {'selector':'.groups-stats', 'type':'text'}
FIELDS['LOGGED']['GROUPS']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
SECTIONS['LOGGED']['RELATED'] = {'selector':'.discovery-results li', 'quantity':'multiple'}
FIELDS['LOGGED']['RELATED'] = {}
FIELDS['LOGGED']['RELATED']['NAME'] = {'selector':'img', 'type':'attr', 'attr':'alt'}
FIELDS['LOGGED']['RELATED']['URL'] = {'selector':'a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['RELATED']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
FIELDS['LOGGED']['RELATED']['LEVEL'] = {'selector':'.degree-icon', 'type':'text'}
SECTIONS['LOGGED']['SIMILAR'] = {'selector':'.browse-map-list li', 'quantity':'multiple'}
FIELDS['LOGGED']['SIMILAR'] = {}
FIELDS['LOGGED']['SIMILAR']['NAME'] = {'selector':'h4', 'type':'text'}
FIELDS['LOGGED']['SIMILAR']['URL'] = {'selector':'h4 a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['SIMILAR']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
FIELDS['LOGGED']['SIMILAR']['TITLE'] = {'selector':'.browse-map-title', 'type':'text'}
SECTIONS['LOGGED']['FOLLOWING'] = {'selector':'.following-container > li', 'quantity':'multiple'}
FIELDS['LOGGED']['FOLLOWING'] = {}
FIELDS['LOGGED']['FOLLOWING']['NAME'] = {'selector':'.channel-name a,following-name a', 'type':'text'}
FIELDS['LOGGED']['FOLLOWING']['URL'] = {'selector':'.channel-name a,following-name a', 'type':'attr', 'attr':'href'}
FIELDS['LOGGED']['FOLLOWING']['DETAILS'] = {'selector':'.following-stats,.following-name', 'type':'text'}
FIELDS['LOGGED']['FOLLOWING']['IMG'] = {'selector':'img', 'type':'attr', 'attr':'src'}
SECTIONS['LOGGED']['PERSONAL'] = {'selector':'#personal-info-view', 'quantity':'multiple'}
FIELDS['LOGGED']['PERSONAL'] = {}
FIELDS['LOGGED']['PERSONAL']['EXTRA'] = {'selector':'', 'type':'text'}
# CODE
def login(self, filename):
config = ConfigParser.ConfigParser()
config.read(filename)
self.LOGIN_USER_VALUE = config.get('credentials', 'login_user_value')
self.LOGIN_PASS_VALUE = config.get('credentials', 'login_pass_value')
self.loadPage(self.INITIAL_URL)
self.waitAndWrite(self.LOGIN_USER_PATH, self.LOGIN_USER_VALUE)
self.submitForm(self.selectAndWrite(self.LOGIN_PASS_PATH, self.LOGIN_PASS_VALUE))
def performClicks(self):
self.clickSelector('#contact-info-tab')
self.clickMultiple('.hidden-view-more')
self.clickMultiple('.toggle-show-more')
self.clickMultiple('.see-action')
self.clickMultiple('.see-more-less')
if self.mode == 'LOGGED':
self.clickMultiple('.see-more')
else:
self.clickMultiple('li.see-more label')
self.clickMultiple('.recommendation label')
time.sleep(0.3)
def get_conn_id(self):
connId = ''
html = self.driver.page_source
arr1 = html.split('connId=')
if len(arr1) > 1:
arr2 = arr1[1].split('&')
connId = arr2[0]
return connId
def extractProfile(self, url):
self.loadAndWait(url, self.CONTAINER[self.mode])
self.performClicks()
self.data = self.extractSection(self.mode)
self.data['friendlyUrl'] = self.driver.current_url
self.data['connId'] = self.get_conn_id()
return self.data
def __init__(self, config=None, debug=False):
if debug:
self.driver = webdriver.Firefox()
else:
self.driver = webdriver.PhantomJS()
self.driver.set_page_load_timeout(self.TIMEOUT)
if config:
self.mode = 'LOGGED'
self.login(config) | en | 0.412451 | # CONFIG # PUBLIC # LOGGED # CODE | 2.528401 | 3 |
polls/tests/__init__.py | moonfruit/yysite | 0 | 6630372 | <gh_stars>0
# -*- coding: utf-8 -*-
from .modules_test import QuestionMethodTests
from .views_test import QuestionIndexDetailTests, QuestionViewTests
| # -*- coding: utf-8 -*-
from .modules_test import QuestionMethodTests
from .views_test import QuestionIndexDetailTests, QuestionViewTests | en | 0.769321 | # -*- coding: utf-8 -*- | 1.027748 | 1 |
tests/http/test_http_status.py | fatcode/chocs | 14 | 6630373 | <gh_stars>10-100
from chocs import HttpStatus
def test_http_status_str() -> None:
status = HttpStatus.OK
assert "200 OK" == str(status)
assert 200 == int(status)
def test_http_status_from_int() -> None:
status = HttpStatus.from_int(200)
assert "200 OK" == str(status)
assert 200 == int(status)
def test_http_status_operators() -> None:
assert HttpStatus.OK > 100
assert HttpStatus.OK > HttpStatus.CONTINUE
assert HttpStatus.OK >= 200
assert HttpStatus.OK >= HttpStatus.OK
assert HttpStatus.OK == HttpStatus.OK
assert HttpStatus.OK == 200
assert HttpStatus.CONTINUE < 200
assert HttpStatus.CONTINUE <= 100
assert HttpStatus.CONTINUE <= HttpStatus.CONTINUE
assert HttpStatus.CONTINUE == HttpStatus.CONTINUE
assert HttpStatus.CONTINUE == 100
| from chocs import HttpStatus
def test_http_status_str() -> None:
status = HttpStatus.OK
assert "200 OK" == str(status)
assert 200 == int(status)
def test_http_status_from_int() -> None:
status = HttpStatus.from_int(200)
assert "200 OK" == str(status)
assert 200 == int(status)
def test_http_status_operators() -> None:
assert HttpStatus.OK > 100
assert HttpStatus.OK > HttpStatus.CONTINUE
assert HttpStatus.OK >= 200
assert HttpStatus.OK >= HttpStatus.OK
assert HttpStatus.OK == HttpStatus.OK
assert HttpStatus.OK == 200
assert HttpStatus.CONTINUE < 200
assert HttpStatus.CONTINUE <= 100
assert HttpStatus.CONTINUE <= HttpStatus.CONTINUE
assert HttpStatus.CONTINUE == HttpStatus.CONTINUE
assert HttpStatus.CONTINUE == 100 | none | 1 | 2.69313 | 3 |
|
present.py | davidjohnoliver/IncomeForecast | 0 | 6630374 | import sim
import model
import statistics
from typing import List
class Simulation_Presenter:
def __init__(self, simulation: sim.Simulation):
self._simulation = simulation
@property
def year_of_retirement(self):
return self._simulation.year_of_retirement
@property
def years_series(self):
"""Years series"""
return [f.year for f in self._simulation.all_funds]
@property
def spending_series(self):
"""Spending series"""
return [d.spending for d in self._simulation.all_deltas]
@property
def salary_series(self):
"""Salary series"""
return [d.gross_salary for d in self._simulation.all_deltas]
@property
def rrsp_total_series(self):
"""Accumulated RRSP series"""
return [f.rrsp_savings for f in self._simulation.all_funds]
@property
def tfsa_total_series(self):
"""Accumulated TFSA series"""
return [f.tfsa_savings for f in self._simulation.all_funds]
@property
def savings_total_series(self):
"""Accumulated total savings series"""
return [f.total_savings for f in self._simulation.all_funds]
# Career
@property
def career_years_series(self):
"""Years series pre-retirement"""
return [
f.year
for f in self._simulation.all_funds
if f.year <= self._simulation.year_of_retirement
]
@property
def career_salary_series(self):
"""Salary series pre-retirement"""
return [
d.gross_salary
for d in self._simulation.all_deltas
if d.year <= self._simulation.year_of_retirement
]
@property
def career_net_income_series(self):
"""Net income series pre-retirement"""
return [
d.total_net_income
for d in self._simulation.all_deltas
if d.year <= self._simulation.year_of_retirement
]
@property
def career_rrsp_contribution_series(self):
"""RRSP contributions pre-retirement"""
return [
d.rrsp
for d in self._simulation.all_deltas
if d.year <= self._simulation.year_of_retirement
]
@property
def career_tfsa_contribution_series(self):
"""RRSP contributions pre-retirement"""
return [
d.tfsa
for d in self._simulation.all_deltas
if d.year <= self._simulation.year_of_retirement
]
@property
def career_total_savings_series(self):
"""Total savings, yearly, pre-retirement"""
return [
d.rrsp + d.tfsa
for d in self._simulation.all_deltas
if d.year < self._simulation.year_of_retirement
]
@property
def career_total_savings_monthly_series(self):
"""Total savings, monthly, pre-retirement"""
return [
(d.rrsp + d.tfsa) / 12.0
for d in self._simulation.all_deltas
if d.year < self._simulation.year_of_retirement
]
# Retirement
@property
def retirement_years_series(self):
"""Years series post-retirement"""
return [
f.year
for f in self._simulation.all_funds
if f.year > self._simulation.year_of_retirement
]
@property
def retirement_rrsp_withdrawal_series(self):
return [
-d.rrsp
for d in self._simulation.all_deltas
if d.year > self._simulation.year_of_retirement
]
@property
def retirement_tfsa_withdrawal_series(self):
return [
-d.tfsa
for d in self._simulation.all_deltas
if d.year > self._simulation.year_of_retirement
]
class Individual_Presenter:
def __init__(
self,
partner_params: sim.Individual_Parameters,
all_deltas: List[model.deltas_state],
) -> None:
self._partner_params = partner_params
self._all_deltas = all_deltas
@property
def salary_series(self):
"""Salary series"""
return [d.gross_salary for d in self._all_deltas]
@property
def tfsa_series(self):
return [d.tfsa for d in self._all_deltas]
@property
def tfsa_monthly_series(self):
return [t / 12 for t in self.tfsa_series]
@property
def rrsp_series(self):
return [d.rrsp for d in self._all_deltas]
@property
def rrsp_monthly_series(self):
return [t / 12 for t in self.rrsp_series]
@property
def career_salary_series(self):
return [d.gross_salary for d in self._all_deltas if d.gross_salary > 0]
@property
def career_year_series(self):
return [d.year for d in self._all_deltas if d.gross_salary > 0]
@property
def career_tfsa_series(self):
return [d.tfsa for d in self._all_deltas if d.gross_salary > 0]
@property
def career_tfsa_monthly_series(self):
return [t/12 for t in self.career_tfsa_series]
@property
def career_rrsp_series(self):
return [d.rrsp for d in self._all_deltas if d.gross_salary > 0]
@property
def career_rrsp_monthly_series(self):
return [t/12 for t in self.career_rrsp_series]
class Dual_Income_Simulation_Presenter:
def __init__(self, simulation: sim.Dual_Income_Simulation):
self._simulation = simulation
self._partner1 = Individual_Presenter(
self._partner1_deltas, [cd.partner1_deltas for cd in simulation.all_deltas]
)
self._partner2 = Individual_Presenter(
self._partner2_deltas, [cd.partner2_deltas for cd in simulation.all_deltas]
)
@property
def partner1(self):
return self._partner1
@property
def partner2(self):
return self._partner2
@property
def year_of_retirement(self):
return self._simulation.year_of_retirement
@property
def years_series(self):
"""Years series"""
return [f.year for f in self._simulation.all_funds]
@property
def career_years_series(self):
"""Years series"""
return [
d.year for d in self._simulation.all_deltas if self._is_someone_working(d)
]
@property
def spending_series(self):
"""Spending series"""
return [d.household_spending for d in self._simulation.all_deltas]
@property
def spending_monthly_series(self):
return [s / 12 for s in self.spending_series]
@property
def combined_savings_series(self):
"""p"""
return [self._combined_savings(d) for d in self._simulation.all_deltas]
@property
def combined_savings_monthly_series(self):
"""p"""
return [s / 12 for s in self.combined_savings_series]
@property
def career_combined_savings_series(self):
"""p"""
return [
self._combined_savings(d)
for d in self._simulation.all_deltas
if self._is_someone_working(d)
]
@property
def career_combined_savings_monthly_series(self):
"""p"""
return [s / 12 for s in self.career_combined_savings_series]
@property
def retirement_spending(self):
return self.spending_series[-1]
@property
def first_year_spending(self):
return self.spending_series[1]
@property
def average_yearly_spending(self):
return statistics.mean(self.spending_series)
@property
def _partner1_deltas(self):
return [d.partner1_deltas for d in self._simulation.all_deltas]
@property
def _partner2_deltas(self):
return [d.partner2_deltas for d in self._simulation.all_deltas]
def _is_someone_working(self, deltas_state: model.couple_deltas_state):
return (
deltas_state.partner1_deltas.gross_salary > 0
or deltas_state.partner2_deltas.gross_salary > 0
)
def _combined_savings(self, deltas_state: model.couple_deltas_state):
return (
deltas_state.partner1_deltas.tfsa
+ deltas_state.partner1_deltas.rrsp
+ deltas_state.partner2_deltas.tfsa
+ deltas_state.partner2_deltas.rrsp
)
| import sim
import model
import statistics
from typing import List
class Simulation_Presenter:
def __init__(self, simulation: sim.Simulation):
self._simulation = simulation
@property
def year_of_retirement(self):
return self._simulation.year_of_retirement
@property
def years_series(self):
"""Years series"""
return [f.year for f in self._simulation.all_funds]
@property
def spending_series(self):
"""Spending series"""
return [d.spending for d in self._simulation.all_deltas]
@property
def salary_series(self):
"""Salary series"""
return [d.gross_salary for d in self._simulation.all_deltas]
@property
def rrsp_total_series(self):
"""Accumulated RRSP series"""
return [f.rrsp_savings for f in self._simulation.all_funds]
@property
def tfsa_total_series(self):
"""Accumulated TFSA series"""
return [f.tfsa_savings for f in self._simulation.all_funds]
@property
def savings_total_series(self):
"""Accumulated total savings series"""
return [f.total_savings for f in self._simulation.all_funds]
# Career
@property
def career_years_series(self):
"""Years series pre-retirement"""
return [
f.year
for f in self._simulation.all_funds
if f.year <= self._simulation.year_of_retirement
]
@property
def career_salary_series(self):
"""Salary series pre-retirement"""
return [
d.gross_salary
for d in self._simulation.all_deltas
if d.year <= self._simulation.year_of_retirement
]
@property
def career_net_income_series(self):
"""Net income series pre-retirement"""
return [
d.total_net_income
for d in self._simulation.all_deltas
if d.year <= self._simulation.year_of_retirement
]
@property
def career_rrsp_contribution_series(self):
"""RRSP contributions pre-retirement"""
return [
d.rrsp
for d in self._simulation.all_deltas
if d.year <= self._simulation.year_of_retirement
]
@property
def career_tfsa_contribution_series(self):
"""RRSP contributions pre-retirement"""
return [
d.tfsa
for d in self._simulation.all_deltas
if d.year <= self._simulation.year_of_retirement
]
@property
def career_total_savings_series(self):
"""Total savings, yearly, pre-retirement"""
return [
d.rrsp + d.tfsa
for d in self._simulation.all_deltas
if d.year < self._simulation.year_of_retirement
]
@property
def career_total_savings_monthly_series(self):
"""Total savings, monthly, pre-retirement"""
return [
(d.rrsp + d.tfsa) / 12.0
for d in self._simulation.all_deltas
if d.year < self._simulation.year_of_retirement
]
# Retirement
@property
def retirement_years_series(self):
"""Years series post-retirement"""
return [
f.year
for f in self._simulation.all_funds
if f.year > self._simulation.year_of_retirement
]
@property
def retirement_rrsp_withdrawal_series(self):
return [
-d.rrsp
for d in self._simulation.all_deltas
if d.year > self._simulation.year_of_retirement
]
@property
def retirement_tfsa_withdrawal_series(self):
return [
-d.tfsa
for d in self._simulation.all_deltas
if d.year > self._simulation.year_of_retirement
]
class Individual_Presenter:
def __init__(
self,
partner_params: sim.Individual_Parameters,
all_deltas: List[model.deltas_state],
) -> None:
self._partner_params = partner_params
self._all_deltas = all_deltas
@property
def salary_series(self):
"""Salary series"""
return [d.gross_salary for d in self._all_deltas]
@property
def tfsa_series(self):
return [d.tfsa for d in self._all_deltas]
@property
def tfsa_monthly_series(self):
return [t / 12 for t in self.tfsa_series]
@property
def rrsp_series(self):
return [d.rrsp for d in self._all_deltas]
@property
def rrsp_monthly_series(self):
return [t / 12 for t in self.rrsp_series]
@property
def career_salary_series(self):
return [d.gross_salary for d in self._all_deltas if d.gross_salary > 0]
@property
def career_year_series(self):
return [d.year for d in self._all_deltas if d.gross_salary > 0]
@property
def career_tfsa_series(self):
return [d.tfsa for d in self._all_deltas if d.gross_salary > 0]
@property
def career_tfsa_monthly_series(self):
return [t/12 for t in self.career_tfsa_series]
@property
def career_rrsp_series(self):
return [d.rrsp for d in self._all_deltas if d.gross_salary > 0]
@property
def career_rrsp_monthly_series(self):
return [t/12 for t in self.career_rrsp_series]
class Dual_Income_Simulation_Presenter:
def __init__(self, simulation: sim.Dual_Income_Simulation):
self._simulation = simulation
self._partner1 = Individual_Presenter(
self._partner1_deltas, [cd.partner1_deltas for cd in simulation.all_deltas]
)
self._partner2 = Individual_Presenter(
self._partner2_deltas, [cd.partner2_deltas for cd in simulation.all_deltas]
)
@property
def partner1(self):
return self._partner1
@property
def partner2(self):
return self._partner2
@property
def year_of_retirement(self):
return self._simulation.year_of_retirement
@property
def years_series(self):
"""Years series"""
return [f.year for f in self._simulation.all_funds]
@property
def career_years_series(self):
"""Years series"""
return [
d.year for d in self._simulation.all_deltas if self._is_someone_working(d)
]
@property
def spending_series(self):
"""Spending series"""
return [d.household_spending for d in self._simulation.all_deltas]
@property
def spending_monthly_series(self):
return [s / 12 for s in self.spending_series]
@property
def combined_savings_series(self):
"""p"""
return [self._combined_savings(d) for d in self._simulation.all_deltas]
@property
def combined_savings_monthly_series(self):
"""p"""
return [s / 12 for s in self.combined_savings_series]
@property
def career_combined_savings_series(self):
"""p"""
return [
self._combined_savings(d)
for d in self._simulation.all_deltas
if self._is_someone_working(d)
]
@property
def career_combined_savings_monthly_series(self):
"""p"""
return [s / 12 for s in self.career_combined_savings_series]
@property
def retirement_spending(self):
return self.spending_series[-1]
@property
def first_year_spending(self):
return self.spending_series[1]
@property
def average_yearly_spending(self):
return statistics.mean(self.spending_series)
@property
def _partner1_deltas(self):
return [d.partner1_deltas for d in self._simulation.all_deltas]
@property
def _partner2_deltas(self):
return [d.partner2_deltas for d in self._simulation.all_deltas]
def _is_someone_working(self, deltas_state: model.couple_deltas_state):
return (
deltas_state.partner1_deltas.gross_salary > 0
or deltas_state.partner2_deltas.gross_salary > 0
)
def _combined_savings(self, deltas_state: model.couple_deltas_state):
return (
deltas_state.partner1_deltas.tfsa
+ deltas_state.partner1_deltas.rrsp
+ deltas_state.partner2_deltas.tfsa
+ deltas_state.partner2_deltas.rrsp
)
| en | 0.9107 | Years series Spending series Salary series Accumulated RRSP series Accumulated TFSA series Accumulated total savings series # Career Years series pre-retirement Salary series pre-retirement Net income series pre-retirement RRSP contributions pre-retirement RRSP contributions pre-retirement Total savings, yearly, pre-retirement Total savings, monthly, pre-retirement # Retirement Years series post-retirement Salary series Years series Years series Spending series p p p p | 3.107785 | 3 |
tests/test_xlsx_to_json.py | open-contracting/ocdskit-web | 3 | 6630375 | <gh_stars>1-10
from tests import ViewTestCase, ViewTests
class XlsxToJsonTestCase(ViewTestCase, ViewTests):
url = '/to-json/'
files = [
'1.1/spreadsheets/flattened.xlsx',
]
def test_go_with_files(self):
self.assertResults({'type': '.csv .xlsx .zip'}, {}, {
'result.json': 'results/unflattened_xlsx.json',
}, mode='rb')
| from tests import ViewTestCase, ViewTests
class XlsxToJsonTestCase(ViewTestCase, ViewTests):
url = '/to-json/'
files = [
'1.1/spreadsheets/flattened.xlsx',
]
def test_go_with_files(self):
self.assertResults({'type': '.csv .xlsx .zip'}, {}, {
'result.json': 'results/unflattened_xlsx.json',
}, mode='rb') | none | 1 | 2.245673 | 2 |
|
official/cv/yolov5/train.py | leelige/mindspore | 77 | 6630376 | <reponame>leelige/mindspore<filename>official/cv/yolov5/train.py
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""YoloV5 train."""
import os
import time
import mindspore as ms
import mindspore.nn as nn
import mindspore.communication as comm
from src.yolo import YOLOV5, YoloWithLossCell
from src.logger import get_logger
from src.util import AverageMeter, get_param_groups, cpu_affinity
from src.lr_scheduler import get_lr
from src.yolo_dataset import create_yolo_dataset
from src.initializer import default_recurisive_init, load_yolov5_params
from model_utils.config import config
from model_utils.device_adapter import get_device_id
# only useful for huawei cloud modelarts.
from model_utils.moxing_adapter import moxing_wrapper, modelarts_pre_process, modelarts_post_process
ms.set_seed(1)
def init_distribute():
comm.init()
config.rank = comm.get_rank()
config.group_size = comm.get_group_size()
ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=config.group_size)
def train_preprocess():
if config.lr_scheduler == 'cosine_annealing' and config.max_epoch > config.T_max:
config.T_max = config.max_epoch
config.lr_epochs = list(map(int, config.lr_epochs.split(',')))
config.data_root = os.path.join(config.data_dir, 'train2017')
config.annFile = os.path.join(config.data_dir, 'annotations/instances_train2017.json')
device_id = get_device_id()
ms.set_context(mode=ms.GRAPH_MODE, device_target=config.device_target, device_id=device_id)
if config.is_distributed:
# init distributed
init_distribute()
# for promoting performance in GPU device
if config.device_target == "GPU" and config.bind_cpu:
cpu_affinity(config.rank, min(config.group_size, config.device_num))
# logger module is managed by config, it is used in other function. e.x. config.logger.info("xxx")
config.logger = get_logger(config.output_dir, config.rank)
config.logger.save_args(config)
@moxing_wrapper(pre_process=modelarts_pre_process, post_process=modelarts_post_process, pre_args=[config])
def run_train():
train_preprocess()
loss_meter = AverageMeter('loss')
dict_version = {'yolov5s': 0, 'yolov5m': 1, 'yolov5l': 2, 'yolov5x': 3}
network = YOLOV5(is_training=True, version=dict_version[config.yolov5_version])
# default is kaiming-normal
default_recurisive_init(network)
load_yolov5_params(config, network)
network = YoloWithLossCell(network)
ds = create_yolo_dataset(image_dir=config.data_root, anno_path=config.annFile, is_training=True,
batch_size=config.per_batch_size, device_num=config.group_size,
rank=config.rank, config=config)
config.logger.info('Finish loading dataset')
steps_per_epoch = ds.get_dataset_size()
lr = get_lr(config, steps_per_epoch)
opt = nn.Momentum(params=get_param_groups(network), momentum=config.momentum, learning_rate=ms.Tensor(lr),
weight_decay=config.weight_decay, loss_scale=config.loss_scale)
network = nn.TrainOneStepCell(network, opt, config.loss_scale // 2)
network.set_train()
data_loader = ds.create_tuple_iterator(do_copy=False)
first_step = True
t_end = time.time()
for epoch_idx in range(config.max_epoch):
for step_idx, data in enumerate(data_loader):
images = data[0]
input_shape = images.shape[2:4]
input_shape = ms.Tensor(tuple(input_shape[::-1]), ms.float32)
loss = network(images, data[2], data[3], data[4], data[5], data[6],
data[7], input_shape)
loss_meter.update(loss.asnumpy())
# it is used for loss, performance output per config.log_interval steps.
if (epoch_idx * steps_per_epoch + step_idx) % config.log_interval == 0:
time_used = time.time() - t_end
if first_step:
fps = config.per_batch_size * config.group_size / time_used
per_step_time = time_used * 1000
first_step = False
else:
fps = config.per_batch_size * config.log_interval * config.group_size / time_used
per_step_time = time_used / config.log_interval * 1000
config.logger.info('epoch[{}], iter[{}], {}, fps:{:.2f} imgs/sec, '
'lr:{}, per step time: {}ms'.format(epoch_idx + 1, step_idx + 1,
loss_meter, fps, lr[step_idx], per_step_time))
t_end = time.time()
loss_meter.reset()
if config.rank == 0:
ckpt_name = os.path.join(config.output_dir, "yolov5_{}_{}.ckpt".format(epoch_idx + 1, steps_per_epoch))
ms.save_checkpoint(network, ckpt_name)
config.logger.info('==========end training===============')
if __name__ == "__main__":
run_train()
| # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""YoloV5 train."""
import os
import time
import mindspore as ms
import mindspore.nn as nn
import mindspore.communication as comm
from src.yolo import YOLOV5, YoloWithLossCell
from src.logger import get_logger
from src.util import AverageMeter, get_param_groups, cpu_affinity
from src.lr_scheduler import get_lr
from src.yolo_dataset import create_yolo_dataset
from src.initializer import default_recurisive_init, load_yolov5_params
from model_utils.config import config
from model_utils.device_adapter import get_device_id
# only useful for huawei cloud modelarts.
from model_utils.moxing_adapter import moxing_wrapper, modelarts_pre_process, modelarts_post_process
ms.set_seed(1)
def init_distribute():
comm.init()
config.rank = comm.get_rank()
config.group_size = comm.get_group_size()
ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=config.group_size)
def train_preprocess():
if config.lr_scheduler == 'cosine_annealing' and config.max_epoch > config.T_max:
config.T_max = config.max_epoch
config.lr_epochs = list(map(int, config.lr_epochs.split(',')))
config.data_root = os.path.join(config.data_dir, 'train2017')
config.annFile = os.path.join(config.data_dir, 'annotations/instances_train2017.json')
device_id = get_device_id()
ms.set_context(mode=ms.GRAPH_MODE, device_target=config.device_target, device_id=device_id)
if config.is_distributed:
# init distributed
init_distribute()
# for promoting performance in GPU device
if config.device_target == "GPU" and config.bind_cpu:
cpu_affinity(config.rank, min(config.group_size, config.device_num))
# logger module is managed by config, it is used in other function. e.x. config.logger.info("xxx")
config.logger = get_logger(config.output_dir, config.rank)
config.logger.save_args(config)
@moxing_wrapper(pre_process=modelarts_pre_process, post_process=modelarts_post_process, pre_args=[config])
def run_train():
train_preprocess()
loss_meter = AverageMeter('loss')
dict_version = {'yolov5s': 0, 'yolov5m': 1, 'yolov5l': 2, 'yolov5x': 3}
network = YOLOV5(is_training=True, version=dict_version[config.yolov5_version])
# default is kaiming-normal
default_recurisive_init(network)
load_yolov5_params(config, network)
network = YoloWithLossCell(network)
ds = create_yolo_dataset(image_dir=config.data_root, anno_path=config.annFile, is_training=True,
batch_size=config.per_batch_size, device_num=config.group_size,
rank=config.rank, config=config)
config.logger.info('Finish loading dataset')
steps_per_epoch = ds.get_dataset_size()
lr = get_lr(config, steps_per_epoch)
opt = nn.Momentum(params=get_param_groups(network), momentum=config.momentum, learning_rate=ms.Tensor(lr),
weight_decay=config.weight_decay, loss_scale=config.loss_scale)
network = nn.TrainOneStepCell(network, opt, config.loss_scale // 2)
network.set_train()
data_loader = ds.create_tuple_iterator(do_copy=False)
first_step = True
t_end = time.time()
for epoch_idx in range(config.max_epoch):
for step_idx, data in enumerate(data_loader):
images = data[0]
input_shape = images.shape[2:4]
input_shape = ms.Tensor(tuple(input_shape[::-1]), ms.float32)
loss = network(images, data[2], data[3], data[4], data[5], data[6],
data[7], input_shape)
loss_meter.update(loss.asnumpy())
# it is used for loss, performance output per config.log_interval steps.
if (epoch_idx * steps_per_epoch + step_idx) % config.log_interval == 0:
time_used = time.time() - t_end
if first_step:
fps = config.per_batch_size * config.group_size / time_used
per_step_time = time_used * 1000
first_step = False
else:
fps = config.per_batch_size * config.log_interval * config.group_size / time_used
per_step_time = time_used / config.log_interval * 1000
config.logger.info('epoch[{}], iter[{}], {}, fps:{:.2f} imgs/sec, '
'lr:{}, per step time: {}ms'.format(epoch_idx + 1, step_idx + 1,
loss_meter, fps, lr[step_idx], per_step_time))
t_end = time.time()
loss_meter.reset()
if config.rank == 0:
ckpt_name = os.path.join(config.output_dir, "yolov5_{}_{}.ckpt".format(epoch_idx + 1, steps_per_epoch))
ms.save_checkpoint(network, ckpt_name)
config.logger.info('==========end training===============')
if __name__ == "__main__":
run_train() | en | 0.792111 | # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ YoloV5 train. # only useful for huawei cloud modelarts. # init distributed # for promoting performance in GPU device # logger module is managed by config, it is used in other function. e.x. config.logger.info("xxx") # default is kaiming-normal # it is used for loss, performance output per config.log_interval steps. | 1.726048 | 2 |
async_pubsub/redis_pubsub.py | abhinavsingh/async_pubsub | 17 | 6630377 | <gh_stars>10-100
# -*- coding: utf-8 -*-
import io
import socket
import redis
import hiredis
from tornado.iostream import IOStream
from .base import PubSubBase
class RedisPubSub(PubSubBase):
def __init__(self, host='127.0.0.1', port=6379, *args, **kwargs):
self.host = host
self.port = port
super(RedisPubSub, self).__init__(*args, **kwargs)
@staticmethod
def get_redis():
return redis.StrictRedis(
host = '127.0.0.1',
port = 6379,
db = 0
)
##
## pubsub api
##
def connect(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.stream = IOStream(self.socket)
self.stream.connect((self.host, self.port), self.on_connect)
def disconnect(self):
self.unsubscribe()
self.stream.close()
def subscribe(self, channel_id):
self.send('SUBSCRIBE', channel_id)
def unsubscribe(self, channel_id=None):
if channel_id:
self.send('UNSUBSCRIBE', channel_id)
else:
self.send('UNSUBSCRIBE')
@staticmethod
def publish(channel_id, message):
r = RedisPubSub.get_redis()
r.publish(channel_id, message)
##
## socket/stream callbacks
##
def on_connect(self):
self.stream.set_close_callback(self.on_close)
self.stream.read_until_close(self.on_data, self.on_streaming_data)
self.reader = hiredis.Reader()
self.connected()
def on_data(self, *args, **kwargs):
pass
def on_streaming_data(self, data):
self.reader.feed(data)
reply = self.reader.gets()
while reply:
if reply[0] == 'subscribe':
self.subscribed(reply[1])
elif reply[0] == 'unsubscribe':
self.unsubscribed(reply[1])
elif reply[0] == 'message':
self.on_message(reply[1], reply[2])
else:
raise Exception('Unhandled data from redis %s' % reply)
reply = self.reader.gets()
def on_close(self):
self.socket = None
self.stream = None
self.disconnected()
##
## redis protocol parser (derived from redis-py)
##
def encode(self, value):
if isinstance(value, bytes):
return value
if isinstance(value, float):
value = repr(value)
if not isinstance(value, basestring):
value = str(value)
if isinstance(value, unicode):
value = value.encode('utf-8', 'strict')
return value
def pack_command(self, *args):
cmd = io.BytesIO()
cmd.write('*')
cmd.write(str(len(args)))
cmd.write('\r\n')
for arg in args:
arg = self.encode(arg)
cmd.write('$')
cmd.write(str(len(arg)))
cmd.write('\r\n')
cmd.write(arg)
cmd.write('\r\n')
return cmd.getvalue()
def send(self, *args):
"""Send redis command."""
cmd = self.pack_command(*args)
self.stream.write(cmd)
| # -*- coding: utf-8 -*-
import io
import socket
import redis
import hiredis
from tornado.iostream import IOStream
from .base import PubSubBase
class RedisPubSub(PubSubBase):
def __init__(self, host='127.0.0.1', port=6379, *args, **kwargs):
self.host = host
self.port = port
super(RedisPubSub, self).__init__(*args, **kwargs)
@staticmethod
def get_redis():
return redis.StrictRedis(
host = '127.0.0.1',
port = 6379,
db = 0
)
##
## pubsub api
##
def connect(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.stream = IOStream(self.socket)
self.stream.connect((self.host, self.port), self.on_connect)
def disconnect(self):
self.unsubscribe()
self.stream.close()
def subscribe(self, channel_id):
self.send('SUBSCRIBE', channel_id)
def unsubscribe(self, channel_id=None):
if channel_id:
self.send('UNSUBSCRIBE', channel_id)
else:
self.send('UNSUBSCRIBE')
@staticmethod
def publish(channel_id, message):
r = RedisPubSub.get_redis()
r.publish(channel_id, message)
##
## socket/stream callbacks
##
def on_connect(self):
self.stream.set_close_callback(self.on_close)
self.stream.read_until_close(self.on_data, self.on_streaming_data)
self.reader = hiredis.Reader()
self.connected()
def on_data(self, *args, **kwargs):
pass
def on_streaming_data(self, data):
self.reader.feed(data)
reply = self.reader.gets()
while reply:
if reply[0] == 'subscribe':
self.subscribed(reply[1])
elif reply[0] == 'unsubscribe':
self.unsubscribed(reply[1])
elif reply[0] == 'message':
self.on_message(reply[1], reply[2])
else:
raise Exception('Unhandled data from redis %s' % reply)
reply = self.reader.gets()
def on_close(self):
self.socket = None
self.stream = None
self.disconnected()
##
## redis protocol parser (derived from redis-py)
##
def encode(self, value):
if isinstance(value, bytes):
return value
if isinstance(value, float):
value = repr(value)
if not isinstance(value, basestring):
value = str(value)
if isinstance(value, unicode):
value = value.encode('utf-8', 'strict')
return value
def pack_command(self, *args):
cmd = io.BytesIO()
cmd.write('*')
cmd.write(str(len(args)))
cmd.write('\r\n')
for arg in args:
arg = self.encode(arg)
cmd.write('$')
cmd.write(str(len(arg)))
cmd.write('\r\n')
cmd.write(arg)
cmd.write('\r\n')
return cmd.getvalue()
def send(self, *args):
"""Send redis command."""
cmd = self.pack_command(*args)
self.stream.write(cmd) | en | 0.402193 | # -*- coding: utf-8 -*- ## ## pubsub api ## ## ## socket/stream callbacks ## ## ## redis protocol parser (derived from redis-py) ## Send redis command. | 2.360284 | 2 |
yampex/scaling.py | edsuom/yampex | 16 | 6630378 | <reponame>edsuom/yampex<filename>yampex/scaling.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# yampex:
# Yet Another Matplotlib Extension
#
# Copyright (C) 2017-2021 by <NAME>,
# http://edsuom.com/yampex
#
# See edsuom.com for API documentation as well as information about
# Ed's background and other projects, software and otherwise.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Do everything with a Plotter in context.
"""
import numpy as np
class Scaler(object):
"""
Multiples go like this: 1000, 500, 200, 100, 50, 20, 10, 5, 1, 0.5, ...
"""
mantissas = 10, 5, 2, 1
initialExponent = 3
minExponent = -7
maxCrossoverFraction = 0.4
def __init__(self, X):
self.X = X
self.Xmax = X.max()
self.ssX = 0.9 * sum(np.square(X))
self.maxCrossoverN = self.maxCrossoverFraction * len(X)
def tryScale(self, Y, multiplier, ignoreCrossover=False):
Ym = multiplier*Y
if Ym.max() > self.Xmax or sum(np.square(Ym)) > self.ssX:
# Sum of squares under X has to be greater than that under Y
return False
if ignoreCrossover:
return True
Z = np.greater(Ym, self.X)
if not np.any(Z):
# No crossover, this will work
return True
# One crossover region is also OK, if Y is still below
# X most of the time
K = np.nonzero(Z)[0]
if len(K) > self.maxCrossoverN:
# Too much crossover time
return False
if len(K) < 20:
# It's tiny, this is fine no matter what
return True
# Skip a small fragment on the ends of the crossover
# region to accomodate slight noise
K = K[8:-8]
# No skipped indices == one continuous region
if np.ediff1d(K).max() == 1:
return True
def __call__(self, Y):
"""
Returns an appropriate scaling factor for 1-D numpy array I{Y}
relative to my base array I{X}.
"""
for ignoreCrossover in (False, True):
k = -1
exponent = self.initialExponent
while True:
k += 1
if k == len(self.mantissas):
k = 0
exponent -= 1
if exponent < self.minExponent:
# No suitable multiplier found
break
multiplier = self.mantissas[k] * 10**exponent
if self.tryScale(Y, multiplier, ignoreCrossover):
return multiplier
return 1.0
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# yampex:
# Yet Another Matplotlib Extension
#
# Copyright (C) 2017-2021 by <NAME>,
# http://edsuom.com/yampex
#
# See edsuom.com for API documentation as well as information about
# Ed's background and other projects, software and otherwise.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Do everything with a Plotter in context.
"""
import numpy as np
class Scaler(object):
"""
Multiples go like this: 1000, 500, 200, 100, 50, 20, 10, 5, 1, 0.5, ...
"""
mantissas = 10, 5, 2, 1
initialExponent = 3
minExponent = -7
maxCrossoverFraction = 0.4
def __init__(self, X):
self.X = X
self.Xmax = X.max()
self.ssX = 0.9 * sum(np.square(X))
self.maxCrossoverN = self.maxCrossoverFraction * len(X)
def tryScale(self, Y, multiplier, ignoreCrossover=False):
Ym = multiplier*Y
if Ym.max() > self.Xmax or sum(np.square(Ym)) > self.ssX:
# Sum of squares under X has to be greater than that under Y
return False
if ignoreCrossover:
return True
Z = np.greater(Ym, self.X)
if not np.any(Z):
# No crossover, this will work
return True
# One crossover region is also OK, if Y is still below
# X most of the time
K = np.nonzero(Z)[0]
if len(K) > self.maxCrossoverN:
# Too much crossover time
return False
if len(K) < 20:
# It's tiny, this is fine no matter what
return True
# Skip a small fragment on the ends of the crossover
# region to accomodate slight noise
K = K[8:-8]
# No skipped indices == one continuous region
if np.ediff1d(K).max() == 1:
return True
def __call__(self, Y):
"""
Returns an appropriate scaling factor for 1-D numpy array I{Y}
relative to my base array I{X}.
"""
for ignoreCrossover in (False, True):
k = -1
exponent = self.initialExponent
while True:
k += 1
if k == len(self.mantissas):
k = 0
exponent -= 1
if exponent < self.minExponent:
# No suitable multiplier found
break
multiplier = self.mantissas[k] * 10**exponent
if self.tryScale(Y, multiplier, ignoreCrossover):
return multiplier
return 1.0 | en | 0.831757 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # yampex: # Yet Another Matplotlib Extension # # Copyright (C) 2017-2021 by <NAME>, # http://edsuom.com/yampex # # See edsuom.com for API documentation as well as information about # Ed's background and other projects, software and otherwise. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS # IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language # governing permissions and limitations under the License. Do everything with a Plotter in context. Multiples go like this: 1000, 500, 200, 100, 50, 20, 10, 5, 1, 0.5, ... # Sum of squares under X has to be greater than that under Y # No crossover, this will work # One crossover region is also OK, if Y is still below # X most of the time # Too much crossover time # It's tiny, this is fine no matter what # Skip a small fragment on the ends of the crossover # region to accomodate slight noise # No skipped indices == one continuous region Returns an appropriate scaling factor for 1-D numpy array I{Y} relative to my base array I{X}. # No suitable multiplier found | 3.078247 | 3 |
invenio_records_resources/services/records/results.py | FlorianCassayre/invenio-records-resources | 0 | 6630379 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
#
# Invenio-Records-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Service results."""
from flask import current_app
from marshmallow_utils.links import LinksFactory
from ...config import lt_es7
from ...pagination import Pagination
from ..base import ServiceItemResult, ServiceListResult
def _current_host():
"""Function used to provide the current hostname to the link store."""
if current_app:
return current_app.config['SITE_HOSTNAME']
return None
class RecordItem(ServiceItemResult):
"""Single record result."""
def __init__(self, service, identity, record, errors=None,
links_config=None):
"""Constructor."""
self._errors = errors
self._identity = identity
self._links_config = links_config
self._record = record
self._service = service
self._data = None
@property
def id(self):
"""Get the record id."""
return self._record.pid.pid_value
def __getitem__(self, key):
"""Key a key from the data."""
return self.data[key]
@property
def data(self):
"""Property to get the record."""
if self._data:
return self._data
links = LinksFactory(host=_current_host, config=self._links_config)
self._data = self._service.schema.dump(
self._identity,
self._record,
links_namespace="record",
links_factory=links,
)
return self._data
def to_dict(self):
"""Get a dictionary for the record."""
res = self.data
if self._errors:
res['errors'] = self._errors
return res
class RecordList(ServiceListResult):
"""List of records result."""
def __init__(self, service, identity, results, params,
links_config=None):
"""Constructor.
:params service: a service instance
:params identity: an identity that performed the service request
:params results: the search results
:params params: dictionary of the query parameters
:params links_config: a links store config
"""
self._identity = identity
self._links_config = links_config
self._results = results
self._service = service
self._params = params
def __len__(self):
"""Return the total numer of hits."""
return self.total
def __iter__(self):
"""Iterator over the hits."""
return self.hits
@property
def total(self):
"""Get total number of hits."""
if lt_es7:
return self._results.hits.total
else:
return self._results.hits.total["value"]
@property
def aggregations(self):
"""Get the search result aggregations."""
return self._results.aggregations.to_dict()
@property
def hits(self):
"""Iterator over the hits."""
links = LinksFactory(host=_current_host, config=self._links_config)
for hit in self._results:
# Load dump
record = self._service.record_cls.loads(hit.to_dict())
# Project the record
projection = self._service.schema.dump(
self._identity,
record,
pid=record.pid,
record=record,
links_namespace="record",
links_factory=links
)
yield projection
@property
def pagination(self):
"""Create a pagination object."""
return Pagination(
self._params['size'],
self._params['page'],
self.total,
)
@property
def links(self):
"""Get the search result links.
TODO: Would be nicer if this were a parallel of data above.
"""
links = LinksFactory(host=_current_host, config=self._links_config)
schema = self._service.schema_search_links
data = schema.dump(
self._identity,
# It ain't pretty but it will do
{**self._params, "_pagination": self.pagination},
links_factory=links,
links_namespace="search",
)
return data.get("links")
def to_dict(self):
"""Return result as a dictionary."""
res = {
"hits": {
"hits": list(self.hits),
"total": self.total,
},
"links": self.links,
"sortBy": self._params["sort"],
"aggregations": self.aggregations,
}
if res['links'] is None:
del res['links']
return res
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
#
# Invenio-Records-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Service results."""
from flask import current_app
from marshmallow_utils.links import LinksFactory
from ...config import lt_es7
from ...pagination import Pagination
from ..base import ServiceItemResult, ServiceListResult
def _current_host():
"""Function used to provide the current hostname to the link store."""
if current_app:
return current_app.config['SITE_HOSTNAME']
return None
class RecordItem(ServiceItemResult):
"""Single record result."""
def __init__(self, service, identity, record, errors=None,
links_config=None):
"""Constructor."""
self._errors = errors
self._identity = identity
self._links_config = links_config
self._record = record
self._service = service
self._data = None
@property
def id(self):
"""Get the record id."""
return self._record.pid.pid_value
def __getitem__(self, key):
"""Key a key from the data."""
return self.data[key]
@property
def data(self):
"""Property to get the record."""
if self._data:
return self._data
links = LinksFactory(host=_current_host, config=self._links_config)
self._data = self._service.schema.dump(
self._identity,
self._record,
links_namespace="record",
links_factory=links,
)
return self._data
def to_dict(self):
"""Get a dictionary for the record."""
res = self.data
if self._errors:
res['errors'] = self._errors
return res
class RecordList(ServiceListResult):
"""List of records result."""
def __init__(self, service, identity, results, params,
links_config=None):
"""Constructor.
:params service: a service instance
:params identity: an identity that performed the service request
:params results: the search results
:params params: dictionary of the query parameters
:params links_config: a links store config
"""
self._identity = identity
self._links_config = links_config
self._results = results
self._service = service
self._params = params
def __len__(self):
"""Return the total numer of hits."""
return self.total
def __iter__(self):
"""Iterator over the hits."""
return self.hits
@property
def total(self):
"""Get total number of hits."""
if lt_es7:
return self._results.hits.total
else:
return self._results.hits.total["value"]
@property
def aggregations(self):
"""Get the search result aggregations."""
return self._results.aggregations.to_dict()
@property
def hits(self):
"""Iterator over the hits."""
links = LinksFactory(host=_current_host, config=self._links_config)
for hit in self._results:
# Load dump
record = self._service.record_cls.loads(hit.to_dict())
# Project the record
projection = self._service.schema.dump(
self._identity,
record,
pid=record.pid,
record=record,
links_namespace="record",
links_factory=links
)
yield projection
@property
def pagination(self):
"""Create a pagination object."""
return Pagination(
self._params['size'],
self._params['page'],
self.total,
)
@property
def links(self):
"""Get the search result links.
TODO: Would be nicer if this were a parallel of data above.
"""
links = LinksFactory(host=_current_host, config=self._links_config)
schema = self._service.schema_search_links
data = schema.dump(
self._identity,
# It ain't pretty but it will do
{**self._params, "_pagination": self.pagination},
links_factory=links,
links_namespace="search",
)
return data.get("links")
def to_dict(self):
"""Return result as a dictionary."""
res = {
"hits": {
"hits": list(self.hits),
"total": self.total,
},
"links": self.links,
"sortBy": self._params["sort"],
"aggregations": self.aggregations,
}
if res['links'] is None:
del res['links']
return res
| en | 0.754577 | # -*- coding: utf-8 -*- # # Copyright (C) 2020 CERN. # Copyright (C) 2020 Northwestern University. # # Invenio-Records-Resources is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see LICENSE file for more # details. Service results. Function used to provide the current hostname to the link store. Single record result. Constructor. Get the record id. Key a key from the data. Property to get the record. Get a dictionary for the record. List of records result. Constructor. :params service: a service instance :params identity: an identity that performed the service request :params results: the search results :params params: dictionary of the query parameters :params links_config: a links store config Return the total numer of hits. Iterator over the hits. Get total number of hits. Get the search result aggregations. Iterator over the hits. # Load dump # Project the record Create a pagination object. Get the search result links. TODO: Would be nicer if this were a parallel of data above. # It ain't pretty but it will do Return result as a dictionary. | 2.061808 | 2 |
abstar/utils/cigar.py | briney/abstar | 30 | 6630380 | #!/usr/bin/env python
# filename: cigar.py
#
# Copyright (c) 2020 <NAME>
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
def make_cigar(germline_segment):
cigar = ''
if germline_segment.query_start > 0:
cigar += '{}S'.format(germline_segment.query_start)
if germline_segment.germline_start > 0:
cigar += '{}N'.format(germline_segment.germline_start)
cigar += make_alignment_cigar(germline_segment.realignment.aligned_query,
germline_segment.realignment.aligned_target)
return cigar
def get_cigar_code(q, t):
if q == '-':
return 'D'
if t == '-':
return 'I'
return 'M'
def make_alignment_cigar(query, target):
prev = get_cigar_code(query[0], target[0])
count = 1
cigar = ''
for q, t in zip(query[1:], target[1:]):
curr = get_cigar_code(q, t)
if prev is None:
prev = curr
elif curr == prev:
count += 1
else:
count += 1
cigar += '{}{}'.format(count, prev)
prev = curr
count = 0
cigar += '{}{}'.format(count + 1, prev)
return cigar
| #!/usr/bin/env python
# filename: cigar.py
#
# Copyright (c) 2020 <NAME>
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
def make_cigar(germline_segment):
cigar = ''
if germline_segment.query_start > 0:
cigar += '{}S'.format(germline_segment.query_start)
if germline_segment.germline_start > 0:
cigar += '{}N'.format(germline_segment.germline_start)
cigar += make_alignment_cigar(germline_segment.realignment.aligned_query,
germline_segment.realignment.aligned_target)
return cigar
def get_cigar_code(q, t):
if q == '-':
return 'D'
if t == '-':
return 'I'
return 'M'
def make_alignment_cigar(query, target):
prev = get_cigar_code(query[0], target[0])
count = 1
cigar = ''
for q, t in zip(query[1:], target[1:]):
curr = get_cigar_code(q, t)
if prev is None:
prev = curr
elif curr == prev:
count += 1
else:
count += 1
cigar += '{}{}'.format(count, prev)
prev = curr
count = 0
cigar += '{}{}'.format(count + 1, prev)
return cigar
| en | 0.745772 | #!/usr/bin/env python # filename: cigar.py # # Copyright (c) 2020 <NAME> # License: The MIT license (http://opensource.org/licenses/MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software # and associated documentation files (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, publish, distribute, # sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # | 2.356925 | 2 |
weather/views.py | swiftlabUAS/SwiftUTM | 0 | 6630381 | <filename>weather/views.py
# from django.template.response import TemplateResponse
from django.shortcuts import render
from django.views.generic.base import TemplateView
from .models import Reading
def weather(request):
data = Reading.objects.last()
# TODO: Weather: Does this mean that you only get what the last user saved?
return render(request,'weather/weather.html',{'data':data})
class WeatherView(TemplateView):
template_name = "weather/weather-js.html"
# def get_context_data(self, **kwargs):
# context = super(HomePageView, self).get_context_data(**kwargs)
# context['latest_articles'] = Article.objects.all()[:5]
# return context
| <filename>weather/views.py
# from django.template.response import TemplateResponse
from django.shortcuts import render
from django.views.generic.base import TemplateView
from .models import Reading
def weather(request):
data = Reading.objects.last()
# TODO: Weather: Does this mean that you only get what the last user saved?
return render(request,'weather/weather.html',{'data':data})
class WeatherView(TemplateView):
template_name = "weather/weather-js.html"
# def get_context_data(self, **kwargs):
# context = super(HomePageView, self).get_context_data(**kwargs)
# context['latest_articles'] = Article.objects.all()[:5]
# return context
| en | 0.481566 | # from django.template.response import TemplateResponse # TODO: Weather: Does this mean that you only get what the last user saved? # def get_context_data(self, **kwargs): # context = super(HomePageView, self).get_context_data(**kwargs) # context['latest_articles'] = Article.objects.all()[:5] # return context | 2.365383 | 2 |
benchml/accumulator.py | rudolfspetrovs/benchml | 3 | 6630382 | <reponame>rudolfspetrovs/benchml<filename>benchml/accumulator.py<gh_stars>1-10
import json
import numpy as np
import scipy.stats
try:
import sklearn.metrics
except ImportError:
pass
def metric_mse(yp, yt):
return np.sum((yp - yt) ** 2) / yp.shape[0]
def metric_rmse(yp, yt):
return metric_mse(yp, yt) ** 0.5
def metric_mae(yp, yt):
return np.sum(np.abs(yp - yt)) / yp.shape[0]
def metric_rhop(yp, yt):
return scipy.stats.pearsonr(yp, yt)[0]
def metric_rhor(yp, yt):
return scipy.stats.spearmanr(yp, yt).correlation
def metric_auc(yp, yt):
return sklearn.metrics.roc_auc_score(yt, yp)
def metric_auc_ovr(yp, yt):
return sklearn.metrics.roc_auc_score(yt, yp, multi_class="ovr")
def metric_auc_ovo(yp, yt):
return sklearn.metrics.roc_auc_score(yt, yp, multi_class="ovo")
def metric_acc(yp, yt):
return 1.0 - np.sum(np.heaviside(np.abs(yp - yt) - 0.5, 0.0)) / len(yt)
def metric_mcc(yp, yt):
return sklearn.metrics.matthews_corrcoef(yt, yp)
def metric_prec(yp, yt):
return sklearn.metrics.precision_score(yt, yp)
def metric_rec(yp, yt):
return sklearn.metrics.recall_score(yt, yp)
def metric_r2(yp, yt):
return sklearn.metrics.r2_score(yt, yp)
def metric_sup(yp, yt):
return np.max(np.abs(yp - yt))
def metric_bal(yp, yt):
return 0.5 * metric_mae(yp, yt) + 0.25 * metric_rmse(yp, yt) + 0.25 * metric_sup(yp, yt)
class Accumulator(object):
eval_map = {
"mae": metric_mae,
"mse": metric_mse,
"rmse": metric_rmse,
"rhop": metric_rhop,
"rhor": metric_rhor,
"auc": metric_auc,
"auc_ovo": metric_auc_ovo,
"auc_ovr": metric_auc_ovr,
"acc": metric_acc,
"mcc": metric_mcc,
"rec": metric_rec,
"prec": metric_prec,
"r2": metric_r2,
"sup": metric_sup,
"bal": metric_bal,
}
select_best = {
"mae": "smallest",
"mse": "smallest",
"rmse": "smallest",
"rhop": "largest",
"rhor": "largest",
"auc": "largest",
"auc_ovo": "largest",
"auc_ovr": "largest",
"acc": "largest",
"mcc": "largest",
"rec": "largest",
"prec": "largest",
"r2": "largest",
"sup": "smallest",
"bal": "smallest",
}
@classmethod
def select(cls, metric):
return cls.select_best[metric]
def score(self, metric, *args, **kwargs):
return self.eval_map[metric](*args, **kwargs)
def __init__(self, jsonfile=None, metric=None, metrics=None):
self.yp_map = {}
self.yt_map = {}
self.metric = metric
self.metrics = metrics
if jsonfile is not None:
self.load(jsonfile)
return
def __getitem__(self, key):
return np.array(self.yp_map[key]), np.array(self.yt_map[key])
def append(self, channel, yp, yt):
if channel not in self.yp_map:
self.yp_map[channel] = []
self.yt_map[channel] = []
self.yp_map[channel] = self.yp_map[channel] + list(yp)
self.yt_map[channel] = self.yt_map[channel] + list(yt)
return
def evaluate(self, channel, metric=None, bootstrap=0):
if metric is None:
metric = self.metric
if len(self.yp_map[channel]) < 1:
return np.nan
if bootstrap == 0:
return (
Accumulator.eval_map[metric](
np.array(self.yp_map[channel]), np.array(self.yt_map[channel])
),
0.0,
)
else:
v = []
n = len(self.yp_map[channel])
yp = np.array(self.yp_map[channel])
yt = np.array(self.yt_map[channel])
for r in range(bootstrap):
re = np.random.randint(0, n, size=(n,))
v.append(Accumulator.eval_map[metric](yp[re], yt[re]))
return np.mean(v), np.std(v)
def evaluateNull(self, channel, metric, n_samples):
if len(self.yp_map[channel]) < 1:
return np.nan
z = []
for i in range(n_samples):
yp_null = np.array(self.yp_map[channel])
yt_null = np.array(self.yt_map[channel])
np.random.shuffle(yp_null)
z.append(Accumulator.eval_map[metric](yp_null, yt_null))
z = np.sort(np.array(z))
return z
def evaluateAll(self, metrics=None, bootstrap=0, log=None, match=None):
if metrics is None:
metrics = self.metrics
res = {}
if match is None:
channels_iter = sorted(self.yp_map)
else:
channels_iter = filter(lambda cd: cd.startswith(match), sorted(self.yp_map))
for channel in channels_iter:
res[channel] = {}
metric_logs = []
for metric in metrics:
v, dv = self.evaluate(channel, metric, bootstrap=bootstrap)
res[channel][metric] = v
res[channel][metric + "_std"] = dv
if log:
metric_logs.append((metric, v, dv))
if log:
(
log
<< " %-14s "
% (
str(channel)[0:6] + ".." + str(channel)[-6:]
if len(str(channel)) > 14
else str(channel)
)
<< log.flush
)
for metric, v, dv in metric_logs:
log << "%s=%+1.4e +- %+1.4e" % (metric, v, dv) << log.flush
log << log.endl
return res
def save(self, jsonfile):
json.dump(
{"yp_map": self.yp_map, "yt_map": self.yt_map},
open(jsonfile, "w"),
indent=1,
sort_keys=True,
)
return
def load(self, jsonfile):
data = json.load(open(jsonfile))
self.yp_map = data["yp_map"]
self.yt_map = data["yt_map"]
return
| import json
import numpy as np
import scipy.stats
try:
import sklearn.metrics
except ImportError:
pass
def metric_mse(yp, yt):
return np.sum((yp - yt) ** 2) / yp.shape[0]
def metric_rmse(yp, yt):
return metric_mse(yp, yt) ** 0.5
def metric_mae(yp, yt):
return np.sum(np.abs(yp - yt)) / yp.shape[0]
def metric_rhop(yp, yt):
return scipy.stats.pearsonr(yp, yt)[0]
def metric_rhor(yp, yt):
return scipy.stats.spearmanr(yp, yt).correlation
def metric_auc(yp, yt):
return sklearn.metrics.roc_auc_score(yt, yp)
def metric_auc_ovr(yp, yt):
return sklearn.metrics.roc_auc_score(yt, yp, multi_class="ovr")
def metric_auc_ovo(yp, yt):
return sklearn.metrics.roc_auc_score(yt, yp, multi_class="ovo")
def metric_acc(yp, yt):
return 1.0 - np.sum(np.heaviside(np.abs(yp - yt) - 0.5, 0.0)) / len(yt)
def metric_mcc(yp, yt):
return sklearn.metrics.matthews_corrcoef(yt, yp)
def metric_prec(yp, yt):
return sklearn.metrics.precision_score(yt, yp)
def metric_rec(yp, yt):
return sklearn.metrics.recall_score(yt, yp)
def metric_r2(yp, yt):
return sklearn.metrics.r2_score(yt, yp)
def metric_sup(yp, yt):
return np.max(np.abs(yp - yt))
def metric_bal(yp, yt):
return 0.5 * metric_mae(yp, yt) + 0.25 * metric_rmse(yp, yt) + 0.25 * metric_sup(yp, yt)
class Accumulator(object):
eval_map = {
"mae": metric_mae,
"mse": metric_mse,
"rmse": metric_rmse,
"rhop": metric_rhop,
"rhor": metric_rhor,
"auc": metric_auc,
"auc_ovo": metric_auc_ovo,
"auc_ovr": metric_auc_ovr,
"acc": metric_acc,
"mcc": metric_mcc,
"rec": metric_rec,
"prec": metric_prec,
"r2": metric_r2,
"sup": metric_sup,
"bal": metric_bal,
}
select_best = {
"mae": "smallest",
"mse": "smallest",
"rmse": "smallest",
"rhop": "largest",
"rhor": "largest",
"auc": "largest",
"auc_ovo": "largest",
"auc_ovr": "largest",
"acc": "largest",
"mcc": "largest",
"rec": "largest",
"prec": "largest",
"r2": "largest",
"sup": "smallest",
"bal": "smallest",
}
@classmethod
def select(cls, metric):
return cls.select_best[metric]
def score(self, metric, *args, **kwargs):
return self.eval_map[metric](*args, **kwargs)
def __init__(self, jsonfile=None, metric=None, metrics=None):
self.yp_map = {}
self.yt_map = {}
self.metric = metric
self.metrics = metrics
if jsonfile is not None:
self.load(jsonfile)
return
def __getitem__(self, key):
return np.array(self.yp_map[key]), np.array(self.yt_map[key])
def append(self, channel, yp, yt):
if channel not in self.yp_map:
self.yp_map[channel] = []
self.yt_map[channel] = []
self.yp_map[channel] = self.yp_map[channel] + list(yp)
self.yt_map[channel] = self.yt_map[channel] + list(yt)
return
def evaluate(self, channel, metric=None, bootstrap=0):
if metric is None:
metric = self.metric
if len(self.yp_map[channel]) < 1:
return np.nan
if bootstrap == 0:
return (
Accumulator.eval_map[metric](
np.array(self.yp_map[channel]), np.array(self.yt_map[channel])
),
0.0,
)
else:
v = []
n = len(self.yp_map[channel])
yp = np.array(self.yp_map[channel])
yt = np.array(self.yt_map[channel])
for r in range(bootstrap):
re = np.random.randint(0, n, size=(n,))
v.append(Accumulator.eval_map[metric](yp[re], yt[re]))
return np.mean(v), np.std(v)
def evaluateNull(self, channel, metric, n_samples):
if len(self.yp_map[channel]) < 1:
return np.nan
z = []
for i in range(n_samples):
yp_null = np.array(self.yp_map[channel])
yt_null = np.array(self.yt_map[channel])
np.random.shuffle(yp_null)
z.append(Accumulator.eval_map[metric](yp_null, yt_null))
z = np.sort(np.array(z))
return z
def evaluateAll(self, metrics=None, bootstrap=0, log=None, match=None):
if metrics is None:
metrics = self.metrics
res = {}
if match is None:
channels_iter = sorted(self.yp_map)
else:
channels_iter = filter(lambda cd: cd.startswith(match), sorted(self.yp_map))
for channel in channels_iter:
res[channel] = {}
metric_logs = []
for metric in metrics:
v, dv = self.evaluate(channel, metric, bootstrap=bootstrap)
res[channel][metric] = v
res[channel][metric + "_std"] = dv
if log:
metric_logs.append((metric, v, dv))
if log:
(
log
<< " %-14s "
% (
str(channel)[0:6] + ".." + str(channel)[-6:]
if len(str(channel)) > 14
else str(channel)
)
<< log.flush
)
for metric, v, dv in metric_logs:
log << "%s=%+1.4e +- %+1.4e" % (metric, v, dv) << log.flush
log << log.endl
return res
def save(self, jsonfile):
json.dump(
{"yp_map": self.yp_map, "yt_map": self.yt_map},
open(jsonfile, "w"),
indent=1,
sort_keys=True,
)
return
def load(self, jsonfile):
data = json.load(open(jsonfile))
self.yp_map = data["yp_map"]
self.yt_map = data["yt_map"]
return | none | 1 | 2.328197 | 2 |
|
app/modules/util/util.py | anderskswanson/xtensible | 1 | 6630383 | <reponame>anderskswanson/xtensible
from platform import platform
def osversion():
"""
print the current os this module is running on
Usage: !util osversion
"""
return platform()
| from platform import platform
def osversion():
"""
print the current os this module is running on
Usage: !util osversion
"""
return platform() | en | 0.733475 | print the current os this module is running on Usage: !util osversion | 2.66023 | 3 |
setup.py | mgaitan/fortran_magic | 102 | 6630384 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from setuptools import setup
long_description = (open('README.rst').read() + '\n\n' +
open('CHANGES.rst').read())
setup(
name='fortran-magic',
version='0.6.1',
description='An extension for IPython that help to use Fortran in '
'your interactive session.',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mgaitan/fortran_magic',
license='BSD',
keywords="ipython notebook fortran f2py science",
py_modules=['fortranmagic'],
install_requires=['ipython', 'numpy'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: IPython',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Fortran',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering'
],
)
| # -*- coding: utf-8 -*-
from setuptools import setup
long_description = (open('README.rst').read() + '\n\n' +
open('CHANGES.rst').read())
setup(
name='fortran-magic',
version='0.6.1',
description='An extension for IPython that help to use Fortran in '
'your interactive session.',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mgaitan/fortran_magic',
license='BSD',
keywords="ipython notebook fortran f2py science",
py_modules=['fortranmagic'],
install_requires=['ipython', 'numpy'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: IPython',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Fortran',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering'
],
) | en | 0.769321 | # -*- coding: utf-8 -*- | 1.064155 | 1 |
accounts/migrations/0005_auto_20201121_1422.py | jordandelaney/microblog | 0 | 6630385 | # Generated by Django 3.1.3 on 2020-11-21 19:22
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20201112_1559'),
]
operations = [
migrations.AlterModelManagers(
name='customuser',
managers=[
],
),
migrations.AlterField(
model_name='customuser',
name='date_joined',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='customuser',
name='email',
field=models.EmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='customuser',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='customuser',
name='is_staff',
field=models.BooleanField(default=False),
),
]
| # Generated by Django 3.1.3 on 2020-11-21 19:22
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20201112_1559'),
]
operations = [
migrations.AlterModelManagers(
name='customuser',
managers=[
],
),
migrations.AlterField(
model_name='customuser',
name='date_joined',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='customuser',
name='email',
field=models.EmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='customuser',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='customuser',
name='is_staff',
field=models.BooleanField(default=False),
),
]
| en | 0.819721 | # Generated by Django 3.1.3 on 2020-11-21 19:22 | 1.719629 | 2 |
skyportal/tests/api/test_standards.py | bparazin/skyportal | 52 | 6630386 | <gh_stars>10-100
from skyportal.tests import api
def test_standards(view_only_token):
status, data = api(
'GET',
'internal/standards',
params={
'facility': "Keck",
'standard_type': "ESO",
'dec_filter_range_str': None,
'ra_filter_range_str': None,
'show_first_line': True,
},
token=view_only_token,
)
assert status == 200
assert data['status'] == 'success'
assert isinstance(data['data']['starlist_info'], list)
assert len(data['data']['starlist_info']) > 0
# make sure we've got an HD source in here
assert any([x["str"].find("HD") != -1 for x in data['data']['starlist_info']])
def test_standards_bad_standard_list(view_only_token):
status, data = api(
'GET',
'internal/standards',
params={
'facility': "Keck",
'standard_type': "SpaceX",
'dec_filter_range': None,
'ra_filter_range': None,
'show_first_line': True,
},
token=view_only_token,
)
assert status == 400
assert data['message'].find('Invalid') != -1
def test_standards_bad_range(view_only_token):
status, data = api(
'GET',
'internal/standards',
params={
'facility': "Keck",
'standard_type': "ESO",
'dec_filter_range': None,
'ra_filter_range': "(-45, 60)",
'show_first_line': True,
},
token=view_only_token,
)
assert status == 400
assert data['message'].find('Elements out of range') != -1
status, data = api(
'GET',
'internal/standards',
params={
'facility': "Keck",
'standard_type': "ESO",
'dec_filter_range': "(10, 100)",
'ra_filter_range': None,
'show_first_line': True,
},
token=view_only_token,
)
assert status == 400
assert data['message'].find('Elements out of range') != -1
def test_standards_filter(view_only_token):
status, data = api(
'GET',
'internal/standards',
params={
'facility': "Keck",
'standard_type': "ESO",
'dec_filter_range': None,
'ra_filter_range': None,
'show_first_line': True,
},
token=view_only_token,
)
assert status == 200
assert data['status'] == 'success'
full_list = data['data']['starlist_info']
status, data = api(
'GET',
'internal/standards',
params={
'facility': "Keck",
'standard_type': "ESO",
'dec_filter_range': "(-90, 0)",
'ra_filter_range': "(0, 60)",
'show_first_line': True,
},
token=view_only_token,
)
assert status == 200
assert data['status'] == 'success'
filter_list = data['data']['starlist_info']
assert len(filter_list) < len(full_list)
| from skyportal.tests import api
def test_standards(view_only_token):
status, data = api(
'GET',
'internal/standards',
params={
'facility': "Keck",
'standard_type': "ESO",
'dec_filter_range_str': None,
'ra_filter_range_str': None,
'show_first_line': True,
},
token=view_only_token,
)
assert status == 200
assert data['status'] == 'success'
assert isinstance(data['data']['starlist_info'], list)
assert len(data['data']['starlist_info']) > 0
# make sure we've got an HD source in here
assert any([x["str"].find("HD") != -1 for x in data['data']['starlist_info']])
def test_standards_bad_standard_list(view_only_token):
status, data = api(
'GET',
'internal/standards',
params={
'facility': "Keck",
'standard_type': "SpaceX",
'dec_filter_range': None,
'ra_filter_range': None,
'show_first_line': True,
},
token=view_only_token,
)
assert status == 400
assert data['message'].find('Invalid') != -1
def test_standards_bad_range(view_only_token):
status, data = api(
'GET',
'internal/standards',
params={
'facility': "Keck",
'standard_type': "ESO",
'dec_filter_range': None,
'ra_filter_range': "(-45, 60)",
'show_first_line': True,
},
token=view_only_token,
)
assert status == 400
assert data['message'].find('Elements out of range') != -1
status, data = api(
'GET',
'internal/standards',
params={
'facility': "Keck",
'standard_type': "ESO",
'dec_filter_range': "(10, 100)",
'ra_filter_range': None,
'show_first_line': True,
},
token=view_only_token,
)
assert status == 400
assert data['message'].find('Elements out of range') != -1
def test_standards_filter(view_only_token):
status, data = api(
'GET',
'internal/standards',
params={
'facility': "Keck",
'standard_type': "ESO",
'dec_filter_range': None,
'ra_filter_range': None,
'show_first_line': True,
},
token=view_only_token,
)
assert status == 200
assert data['status'] == 'success'
full_list = data['data']['starlist_info']
status, data = api(
'GET',
'internal/standards',
params={
'facility': "Keck",
'standard_type': "ESO",
'dec_filter_range': "(-90, 0)",
'ra_filter_range': "(0, 60)",
'show_first_line': True,
},
token=view_only_token,
)
assert status == 200
assert data['status'] == 'success'
filter_list = data['data']['starlist_info']
assert len(filter_list) < len(full_list) | en | 0.964192 | # make sure we've got an HD source in here | 2.408425 | 2 |
devicewipefromasset.py | jamescapen/api-scripts | 0 | 6630387 | #!/opt/mvsd/bin/python3
## this script will take asset tags in DeviceWipeAsset.csv and using an API call
## to Snipe retrieve the serial numbers for those devices and put them in a new
## CSV, DeviceWipeSerial.csv then it takes the serial numbers from
## DeviceWipeSerial.csv and issues an API command to WS1 (Airwatch) to wipe
## those devices then outputs the wiped device serial numbers to a file named
## %Y%m%d-%H%M%S_WipedDevices.csv
## no checks are in place, use at your own risk
## make sure you have the follow modules installed, then fill in your
## authentication information for bearer token, airwatch tenant code, and airwatch basic auth
import csv
from datetime import datetime
import requests
import credentials
SNIPEURL = "https://snipe.domain.com/api/v1/hardware/bytag/"
WS1URL = "https://awconsole.domain.com/api/mdm/devices/commands?command=DeviceWipe&reason="
SNIPEAPIHEADERS = credentials.snipeAPI
WS1HEADERS = credentials.ws1API
CSVHEADERS = [ 'serialnumber' ]
def get_today():
return datetime.now().strftime("%Y%m%d-%H%M%S")
filename = "%s_%s.%s" % (get_today() , "WipedDevices","csv")
with open('DeviceWipeAsset.csv', 'r') as infile, open("DeviceWipeSerial.csv", 'w') as outfile:
csv_writer = csv.writer(outfile)
csv_reader = csv.DictReader(infile, delimiter=',')
csv_writer.writerow(CSVHEADERS)
for lines in csv_reader:
response = requests.get(SNIPEURL+lines['asset'], headers=SNIPEAPIHEADERS).json()
csv_writer.writerow([response['serial']])
with open('DeviceWipeSerial.csv', 'r') as infile, open (filename, 'w') as outfile:
csv_writer = csv.writer(outfile)
csv_reader = csv.DictReader(infile, delimiter=',')
csv_writer.writerow(CSVHEADERS)
for lines in csv_reader:
response = requests.post(WS1URL+'mdm migration'+'&searchBy=Serialnumber&id='+lines['serialnumber'], headers=WS1HEADERS)
if response.status_code == 202:
csv_writer.writerow([lines['serialnumber'], 'OK'])
else:
csv_writer.writerow([lines['serialnumber'], 'X'])
| #!/opt/mvsd/bin/python3
## this script will take asset tags in DeviceWipeAsset.csv and using an API call
## to Snipe retrieve the serial numbers for those devices and put them in a new
## CSV, DeviceWipeSerial.csv then it takes the serial numbers from
## DeviceWipeSerial.csv and issues an API command to WS1 (Airwatch) to wipe
## those devices then outputs the wiped device serial numbers to a file named
## %Y%m%d-%H%M%S_WipedDevices.csv
## no checks are in place, use at your own risk
## make sure you have the follow modules installed, then fill in your
## authentication information for bearer token, airwatch tenant code, and airwatch basic auth
import csv
from datetime import datetime
import requests
import credentials
SNIPEURL = "https://snipe.domain.com/api/v1/hardware/bytag/"
WS1URL = "https://awconsole.domain.com/api/mdm/devices/commands?command=DeviceWipe&reason="
SNIPEAPIHEADERS = credentials.snipeAPI
WS1HEADERS = credentials.ws1API
CSVHEADERS = [ 'serialnumber' ]
def get_today():
return datetime.now().strftime("%Y%m%d-%H%M%S")
filename = "%s_%s.%s" % (get_today() , "WipedDevices","csv")
with open('DeviceWipeAsset.csv', 'r') as infile, open("DeviceWipeSerial.csv", 'w') as outfile:
csv_writer = csv.writer(outfile)
csv_reader = csv.DictReader(infile, delimiter=',')
csv_writer.writerow(CSVHEADERS)
for lines in csv_reader:
response = requests.get(SNIPEURL+lines['asset'], headers=SNIPEAPIHEADERS).json()
csv_writer.writerow([response['serial']])
with open('DeviceWipeSerial.csv', 'r') as infile, open (filename, 'w') as outfile:
csv_writer = csv.writer(outfile)
csv_reader = csv.DictReader(infile, delimiter=',')
csv_writer.writerow(CSVHEADERS)
for lines in csv_reader:
response = requests.post(WS1URL+'mdm migration'+'&searchBy=Serialnumber&id='+lines['serialnumber'], headers=WS1HEADERS)
if response.status_code == 202:
csv_writer.writerow([lines['serialnumber'], 'OK'])
else:
csv_writer.writerow([lines['serialnumber'], 'X'])
| en | 0.745679 | #!/opt/mvsd/bin/python3 ## this script will take asset tags in DeviceWipeAsset.csv and using an API call ## to Snipe retrieve the serial numbers for those devices and put them in a new ## CSV, DeviceWipeSerial.csv then it takes the serial numbers from ## DeviceWipeSerial.csv and issues an API command to WS1 (Airwatch) to wipe ## those devices then outputs the wiped device serial numbers to a file named ## %Y%m%d-%H%M%S_WipedDevices.csv ## no checks are in place, use at your own risk ## make sure you have the follow modules installed, then fill in your ## authentication information for bearer token, airwatch tenant code, and airwatch basic auth | 2.69272 | 3 |
roster_scraper.py | football61/247-scraper | 0 | 6630388 | <reponame>football61/247-scraper<filename>roster_scraper.py<gh_stars>0
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import time
import csv
from selenium.webdriver.chrome.options import Options
from random import uniform
from itertools import zip_longest
from bs4 import BeautifulSoup
import requests
import pandas as pd
from lxml import html
import re
import datetime
driver = webdriver.Chrome("chromedriver.exe")
df = pd.read_csv('team_roster_urls.csv', sep='\t')
teams = len(df) + 1
print(teams)
print('teams')
commits = [df.team_url.tolist()]
print(commits)
print('commits')
csv_columns = ['url', 'name', 'jersey', 'position', 'height', 'weight', 'year', 'age', 'hs', 'rating', 'team']
data_dict = []
for teams in commits:
print(teams[0])
for i in range(1,len(teams)):
driver.get(teams[i])
for row in range(1,200):
try:
url = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/table/tbody/tr[' + str(row) + ']/td/a').get_attribute('href').lower()
name = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/table/tbody/tr[' + str(row) + ']/td/a').text.lower()
jersey = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[1]').text.lower()
position = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[2]').text.lower()
height = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[3]').text.lower()
weight = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[4]').text.lower()
year = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[5]').text.lower()
age = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[6]').text.lower()
hs = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[7]').text.lower()
rating = driver.find_element_by_css_selector('tr:nth-child(' + str(row) + ') > td:nth-child(8) > span.rating').text.lower()
data = {'url': url, 'name': name, 'jersey':jersey, 'position': position, 'height':height,
'weight': weight, 'year': year, 'age':age, 'hs': hs, 'rating':rating, 'team':teams[i]}
data_dict.append(data)
except NoSuchElementException:
try:
url = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/table/tbody/tr[' + str(row) + ']/td/a').get_attribute('href').lower()
name = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/table/tbody/tr[' + str(row) + ']/td/a').text.lower()
jersey = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[1]').text.lower()
position = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[2]').text.lower()
height = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[3]').text.lower()
weight = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[4]').text.lower()
year = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[5]').text.lower()
age = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[6]').text.lower()
hs = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[7]').text.lower()
rating = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[8]').text.lower()
data = {'url': url, 'name': name, 'jersey':jersey, 'position': position, 'height':height,
'weight': weight, 'year': year, 'age':age, 'hs': hs, 'rating':rating, 'team':teams[i]}
data_dict.append(data)
except NoSuchElementException:
continue
csv_file = "team_roster_teams.csv"
try:
with open(csv_file, 'w',newline="") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns, delimiter='\t')
writer.writeheader()
for data in data_dict:
writer.writerow(data)
except IOError:
print("I/O error")
print(data_dict)
| from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import time
import csv
from selenium.webdriver.chrome.options import Options
from random import uniform
from itertools import zip_longest
from bs4 import BeautifulSoup
import requests
import pandas as pd
from lxml import html
import re
import datetime
driver = webdriver.Chrome("chromedriver.exe")
df = pd.read_csv('team_roster_urls.csv', sep='\t')
teams = len(df) + 1
print(teams)
print('teams')
commits = [df.team_url.tolist()]
print(commits)
print('commits')
csv_columns = ['url', 'name', 'jersey', 'position', 'height', 'weight', 'year', 'age', 'hs', 'rating', 'team']
data_dict = []
for teams in commits:
print(teams[0])
for i in range(1,len(teams)):
driver.get(teams[i])
for row in range(1,200):
try:
url = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/table/tbody/tr[' + str(row) + ']/td/a').get_attribute('href').lower()
name = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/table/tbody/tr[' + str(row) + ']/td/a').text.lower()
jersey = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[1]').text.lower()
position = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[2]').text.lower()
height = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[3]').text.lower()
weight = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[4]').text.lower()
year = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[5]').text.lower()
age = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[6]').text.lower()
hs = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[7]').text.lower()
rating = driver.find_element_by_css_selector('tr:nth-child(' + str(row) + ') > td:nth-child(8) > span.rating').text.lower()
data = {'url': url, 'name': name, 'jersey':jersey, 'position': position, 'height':height,
'weight': weight, 'year': year, 'age':age, 'hs': hs, 'rating':rating, 'team':teams[i]}
data_dict.append(data)
except NoSuchElementException:
try:
url = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/table/tbody/tr[' + str(row) + ']/td/a').get_attribute('href').lower()
name = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/table/tbody/tr[' + str(row) + ']/td/a').text.lower()
jersey = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[1]').text.lower()
position = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[2]').text.lower()
height = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[3]').text.lower()
weight = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[4]').text.lower()
year = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[5]').text.lower()
age = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[6]').text.lower()
hs = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[7]').text.lower()
rating = driver.find_element_by_xpath('//*[@id="page-content"]/div/section[2]/div/section/div/div/table/tbody/tr[' + str(row) + ']/td[8]').text.lower()
data = {'url': url, 'name': name, 'jersey':jersey, 'position': position, 'height':height,
'weight': weight, 'year': year, 'age':age, 'hs': hs, 'rating':rating, 'team':teams[i]}
data_dict.append(data)
except NoSuchElementException:
continue
csv_file = "team_roster_teams.csv"
try:
with open(csv_file, 'w',newline="") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns, delimiter='\t')
writer.writeheader()
for data in data_dict:
writer.writerow(data)
except IOError:
print("I/O error")
print(data_dict) | none | 1 | 3.092255 | 3 |
|
python/pyspark/pandas/tests/plot/test_frame_plot.py | wangyeweikuer/spark | 1 | 6630389 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option, option_context
from pyspark.pandas.plot import TopNPlotBase, SampledPlotBase, HistogramPlotBase, BoxPlotBase
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class DataFramePlotTest(PandasOnSparkTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("plotting.max_rows", 2000)
set_option("plotting.sample_ratio", None)
@classmethod
def tearDownClass(cls):
reset_option("plotting.max_rows")
reset_option("plotting.sample_ratio")
super().tearDownClass()
def test_missing(self):
psdf = ps.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
unsupported_functions = ["hexbin"]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*not implemented".format(name)
):
getattr(psdf.plot, name)()
def test_topn_max_rows(self):
pdf = pd.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = TopNPlotBase().get_top_n(psdf)
self.assertEqual(len(data), 2000)
def test_sampled_plot_with_ratio(self):
with option_context("plotting.sample_ratio", 0.5):
pdf = pd.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = SampledPlotBase().get_sampled(psdf)
self.assertEqual(round(len(data) / 2500, 1), 0.5)
def test_sampled_plot_with_max_rows(self):
# 'plotting.max_rows' is 2000
pdf = pd.DataFrame(np.random.rand(2000, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = SampledPlotBase().get_sampled(psdf)
self.assertEqual(round(len(data) / 2000, 1), 1)
def test_compute_hist_single_column(self):
psdf = ps.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
expected_bins = np.linspace(1, 50, 11)
bins = HistogramPlotBase.get_bins(psdf[["a"]].to_spark(), 10)
expected_histogram = np.array([5, 4, 1, 0, 0, 0, 0, 0, 0, 1])
histogram = HistogramPlotBase.compute_hist(psdf[["a"]], bins)[0]
self.assert_eq(pd.Series(expected_bins), pd.Series(bins))
self.assert_eq(pd.Series(expected_histogram, name="a"), histogram, almost=True)
def test_compute_hist_multi_columns(self):
expected_bins = np.linspace(1, 50, 11)
psdf = ps.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],
"b": [50, 50, 30, 30, 30, 24, 10, 5, 4, 3, 1],
}
)
bins = HistogramPlotBase.get_bins(psdf.to_spark(), 10)
self.assert_eq(pd.Series(expected_bins), pd.Series(bins))
expected_histograms = [
np.array([5, 4, 1, 0, 0, 0, 0, 0, 0, 1]),
np.array([4, 1, 0, 0, 1, 3, 0, 0, 0, 2]),
]
histograms = HistogramPlotBase.compute_hist(psdf, bins)
expected_names = ["a", "b"]
for histogram, expected_histogram, expected_name in zip(
histograms, expected_histograms, expected_names
):
self.assert_eq(
pd.Series(expected_histogram, name=expected_name), histogram, almost=True
)
def test_compute_box_multi_columns(self):
# compare compute_multicol_stats with compute_stats
def check_box_multi_columns(psdf):
k = 1.5
multicol_stats = BoxPlotBase.compute_multicol_stats(
psdf, ["a", "b", "c"], whis=k, precision=0.01
)
multicol_outliers = BoxPlotBase.multicol_outliers(psdf, multicol_stats)
multicol_whiskers = BoxPlotBase.calc_multicol_whiskers(
["a", "b", "c"], multicol_outliers
)
for col in ["a", "b", "c"]:
col_stats = multicol_stats[col]
col_whiskers = multicol_whiskers[col]
stats, fences = BoxPlotBase.compute_stats(psdf[col], col, whis=k, precision=0.01)
outliers = BoxPlotBase.outliers(psdf[col], col, *fences)
whiskers = BoxPlotBase.calc_whiskers(col, outliers)
self.assertEqual(stats["mean"], col_stats["mean"])
self.assertEqual(stats["med"], col_stats["med"])
self.assertEqual(stats["q1"], col_stats["q1"])
self.assertEqual(stats["q3"], col_stats["q3"])
self.assertEqual(fences[0], col_stats["lfence"])
self.assertEqual(fences[1], col_stats["ufence"])
self.assertEqual(whiskers[0], col_whiskers["min"])
self.assertEqual(whiskers[1], col_whiskers["max"])
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],
"b": [3, 2, 5, 4, 5, 6, 8, 8, 11, 60, 90],
"c": [-30, -2, 5, 4, 5, 6, -8, 8, 11, 12, 18],
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10],
)
psdf = ps.from_pandas(pdf)
check_box_multi_columns(psdf)
check_box_multi_columns(-psdf)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.plot.test_frame_plot import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option, option_context
from pyspark.pandas.plot import TopNPlotBase, SampledPlotBase, HistogramPlotBase, BoxPlotBase
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class DataFramePlotTest(PandasOnSparkTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("plotting.max_rows", 2000)
set_option("plotting.sample_ratio", None)
@classmethod
def tearDownClass(cls):
reset_option("plotting.max_rows")
reset_option("plotting.sample_ratio")
super().tearDownClass()
def test_missing(self):
psdf = ps.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
unsupported_functions = ["hexbin"]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*not implemented".format(name)
):
getattr(psdf.plot, name)()
def test_topn_max_rows(self):
pdf = pd.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = TopNPlotBase().get_top_n(psdf)
self.assertEqual(len(data), 2000)
def test_sampled_plot_with_ratio(self):
with option_context("plotting.sample_ratio", 0.5):
pdf = pd.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = SampledPlotBase().get_sampled(psdf)
self.assertEqual(round(len(data) / 2500, 1), 0.5)
def test_sampled_plot_with_max_rows(self):
# 'plotting.max_rows' is 2000
pdf = pd.DataFrame(np.random.rand(2000, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = SampledPlotBase().get_sampled(psdf)
self.assertEqual(round(len(data) / 2000, 1), 1)
def test_compute_hist_single_column(self):
psdf = ps.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
expected_bins = np.linspace(1, 50, 11)
bins = HistogramPlotBase.get_bins(psdf[["a"]].to_spark(), 10)
expected_histogram = np.array([5, 4, 1, 0, 0, 0, 0, 0, 0, 1])
histogram = HistogramPlotBase.compute_hist(psdf[["a"]], bins)[0]
self.assert_eq(pd.Series(expected_bins), pd.Series(bins))
self.assert_eq(pd.Series(expected_histogram, name="a"), histogram, almost=True)
def test_compute_hist_multi_columns(self):
expected_bins = np.linspace(1, 50, 11)
psdf = ps.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],
"b": [50, 50, 30, 30, 30, 24, 10, 5, 4, 3, 1],
}
)
bins = HistogramPlotBase.get_bins(psdf.to_spark(), 10)
self.assert_eq(pd.Series(expected_bins), pd.Series(bins))
expected_histograms = [
np.array([5, 4, 1, 0, 0, 0, 0, 0, 0, 1]),
np.array([4, 1, 0, 0, 1, 3, 0, 0, 0, 2]),
]
histograms = HistogramPlotBase.compute_hist(psdf, bins)
expected_names = ["a", "b"]
for histogram, expected_histogram, expected_name in zip(
histograms, expected_histograms, expected_names
):
self.assert_eq(
pd.Series(expected_histogram, name=expected_name), histogram, almost=True
)
def test_compute_box_multi_columns(self):
# compare compute_multicol_stats with compute_stats
def check_box_multi_columns(psdf):
k = 1.5
multicol_stats = BoxPlotBase.compute_multicol_stats(
psdf, ["a", "b", "c"], whis=k, precision=0.01
)
multicol_outliers = BoxPlotBase.multicol_outliers(psdf, multicol_stats)
multicol_whiskers = BoxPlotBase.calc_multicol_whiskers(
["a", "b", "c"], multicol_outliers
)
for col in ["a", "b", "c"]:
col_stats = multicol_stats[col]
col_whiskers = multicol_whiskers[col]
stats, fences = BoxPlotBase.compute_stats(psdf[col], col, whis=k, precision=0.01)
outliers = BoxPlotBase.outliers(psdf[col], col, *fences)
whiskers = BoxPlotBase.calc_whiskers(col, outliers)
self.assertEqual(stats["mean"], col_stats["mean"])
self.assertEqual(stats["med"], col_stats["med"])
self.assertEqual(stats["q1"], col_stats["q1"])
self.assertEqual(stats["q3"], col_stats["q3"])
self.assertEqual(fences[0], col_stats["lfence"])
self.assertEqual(fences[1], col_stats["ufence"])
self.assertEqual(whiskers[0], col_whiskers["min"])
self.assertEqual(whiskers[1], col_whiskers["max"])
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],
"b": [3, 2, 5, 4, 5, 6, 8, 8, 11, 60, 90],
"c": [-30, -2, 5, 4, 5, 6, -8, 8, 11, 12, 18],
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10],
)
psdf = ps.from_pandas(pdf)
check_box_multi_columns(psdf)
check_box_multi_columns(-psdf)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.plot.test_frame_plot import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| en | 0.832947 | # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # 'plotting.max_rows' is 2000 # compare compute_multicol_stats with compute_stats # noqa: F401 # type: ignore[import] | 1.901068 | 2 |
main/main.py | chdb/DhammaMap1 | 0 | 6630390 | # coding: utf-8
"""
Initializes flask server and assigns all routes by importing modules
"""
import flask
#import config
import util
from model.config import Config # NB The model module needs to be imported *after* setting CURRENT_VERSION_TIMESTAMP,
# since model.ndbModelBase uses it as default value for version_r property
app = flask.Flask(__name__) # pylint: disable=invalid-name
# note:Flask server doesn't need DEBUG parameter while developing, since server restarting is taken care by GAE SDK
#SECRET_KEY = CONFIG_DB.flask_secret.encode('ascii')
#model.AuthProvider.init()
class Config(object):
DEVELOPMENT = util.DEVT
SECRET_KEY = util.randomB64()
CONFIG_DB = Config.get_master_db()
config = Config()
app.config.from_object(config)
util.debugDict(config,'my config ')
util.debugDict(app.config,'flask app config ')
app.jinja_env.line_statement_prefix = '#'
app.jinja_env.line_comment_prefix = '##'
import auth # pylint: disable=unused-import
import control.error
import control.index
import control.user
import model # pylint: disable=unused-import
import task # pylint: disable=unused-import
from api import helpers
API = helpers.Api(app)
import api.v1 # pylint: disable=unused-import
import logging
logging.debug('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ main @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#logging.debug('####################################################### app id: %r ' , config.APPLICATION_ID)
# logging.debug('####################################################### cur ver id: %r' , config.CURRENT_VERSION_ID)
# logging.debug('####################################################### cur ver name: %r' , config.CURRENT_VERSION_NAME)
# logging.debug('####################################################### cur ver timestamp: %r',config.CURRENT_VERSION_TIMESTAMP)
#logging.debug('####################################################### cur ver datetime: %r', config.CURRENT_VERSION_DATE)
# shorts = [i for i[0] in config.authProviders]
# longs = [i for i[1] in config.authProviders]
# assert len(shorts) == len(set(shorts)), 'no short duplicates'
# assert len(longs ) == len(set(longs )), 'no long duplicates'
| # coding: utf-8
"""
Initializes flask server and assigns all routes by importing modules
"""
import flask
#import config
import util
from model.config import Config # NB The model module needs to be imported *after* setting CURRENT_VERSION_TIMESTAMP,
# since model.ndbModelBase uses it as default value for version_r property
app = flask.Flask(__name__) # pylint: disable=invalid-name
# note:Flask server doesn't need DEBUG parameter while developing, since server restarting is taken care by GAE SDK
#SECRET_KEY = CONFIG_DB.flask_secret.encode('ascii')
#model.AuthProvider.init()
class Config(object):
DEVELOPMENT = util.DEVT
SECRET_KEY = util.randomB64()
CONFIG_DB = Config.get_master_db()
config = Config()
app.config.from_object(config)
util.debugDict(config,'my config ')
util.debugDict(app.config,'flask app config ')
app.jinja_env.line_statement_prefix = '#'
app.jinja_env.line_comment_prefix = '##'
import auth # pylint: disable=unused-import
import control.error
import control.index
import control.user
import model # pylint: disable=unused-import
import task # pylint: disable=unused-import
from api import helpers
API = helpers.Api(app)
import api.v1 # pylint: disable=unused-import
import logging
logging.debug('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ main @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#logging.debug('####################################################### app id: %r ' , config.APPLICATION_ID)
# logging.debug('####################################################### cur ver id: %r' , config.CURRENT_VERSION_ID)
# logging.debug('####################################################### cur ver name: %r' , config.CURRENT_VERSION_NAME)
# logging.debug('####################################################### cur ver timestamp: %r',config.CURRENT_VERSION_TIMESTAMP)
#logging.debug('####################################################### cur ver datetime: %r', config.CURRENT_VERSION_DATE)
# shorts = [i for i[0] in config.authProviders]
# longs = [i for i[1] in config.authProviders]
# assert len(shorts) == len(set(shorts)), 'no short duplicates'
# assert len(longs ) == len(set(longs )), 'no long duplicates'
| en | 0.384695 | # coding: utf-8 Initializes flask server and assigns all routes by importing modules #import config # NB The model module needs to be imported *after* setting CURRENT_VERSION_TIMESTAMP, # since model.ndbModelBase uses it as default value for version_r property # pylint: disable=invalid-name # note:Flask server doesn't need DEBUG parameter while developing, since server restarting is taken care by GAE SDK #SECRET_KEY = CONFIG_DB.flask_secret.encode('ascii') #model.AuthProvider.init() #' # pylint: disable=unused-import # pylint: disable=unused-import # pylint: disable=unused-import # pylint: disable=unused-import #logging.debug('####################################################### app id: %r ' , config.APPLICATION_ID) # logging.debug('####################################################### cur ver id: %r' , config.CURRENT_VERSION_ID) # logging.debug('####################################################### cur ver name: %r' , config.CURRENT_VERSION_NAME) # logging.debug('####################################################### cur ver timestamp: %r',config.CURRENT_VERSION_TIMESTAMP) #logging.debug('####################################################### cur ver datetime: %r', config.CURRENT_VERSION_DATE) # shorts = [i for i[0] in config.authProviders] # longs = [i for i[1] in config.authProviders] # assert len(shorts) == len(set(shorts)), 'no short duplicates' # assert len(longs ) == len(set(longs )), 'no long duplicates' | 2.158545 | 2 |
app/core/migrations/0006_example.py | Amirh-zahmatkesh/acc-back | 0 | 6630391 | <reponame>Amirh-zahmatkesh/acc-back
# Generated by Django 3.0.7 on 2020-06-17 08:01
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_faq'),
]
operations = [
migrations.CreateModel(
name='Example',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', ckeditor.fields.RichTextField()),
('answer', ckeditor.fields.RichTextField()),
],
),
]
| # Generated by Django 3.0.7 on 2020-06-17 08:01
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_faq'),
]
operations = [
migrations.CreateModel(
name='Example',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', ckeditor.fields.RichTextField()),
('answer', ckeditor.fields.RichTextField()),
],
),
] | en | 0.829637 | # Generated by Django 3.0.7 on 2020-06-17 08:01 | 1.943643 | 2 |
pyboids/app/flock.py | mancaf/pyboids | 12 | 6630392 | <filename>pyboids/app/flock.py
"""Flock class."""
import pygame
import numpy as np
from . import params, utils
from .boid import Boid, LeaderBoid
from .obstacle import Obstacle
class Flock(pygame.sprite.Sprite):
"""Represents a set of boids that obey to certain behaviours."""
def __init__(self):
super().__init__()
self.normal_boids = pygame.sprite.Group()
self.leader_boid = pygame.sprite.GroupSingle()
self.boids = pygame.sprite.Group()
self.obstacles = pygame.sprite.Group()
self.behaviours = {
'pursue': False,
'escape': False,
'wander': True,
'avoid collision': True,
'follow leader': False,
'align': False,
'separate': False,
}
self.kinds = ['normal-boid', 'leader-boid', 'obstacle']
self.add_kind = 'normal-boid'
def switch_element(self):
self.kinds = np.roll(self.kinds, -1)
self.add_kind = self.kinds[0]
def add_element(self, pos):
"""Add a boid at pos.
The type of boid is the current add_kind value.
"""
angle = np.pi * (2 * np.random.rand() - 1)
vel = params.BOID_MAX_SPEED * np.array([np.cos(angle), np.sin(angle)])
if self.add_kind == 'normal-boid':
self.normal_boids.add(Boid(pos=np.array(pos), vel=vel))
self.boids.add(self.normal_boids)
elif self.add_kind == 'leader-boid':
self.boids.remove(self.leader_boid)
self.leader_boid.add(LeaderBoid(pos=np.array(pos), vel=vel))
self.boids.add(self.leader_boid)
elif self.add_kind == 'obstacle':
self.obstacles.add(Obstacle(pos=pos))
def remain_in_screen(self):
for boid in self.boids:
if boid.pos[0] > params.SCREEN_WIDTH - params.BOX_MARGIN:
boid.steer(np.array([-params.STEER_INSIDE, 0.]))
if boid.pos[0] < params.BOX_MARGIN:
boid.steer(np.array([params.STEER_INSIDE, 0.]))
if boid.pos[1] < params.BOX_MARGIN:
boid.steer(np.array([0., params.STEER_INSIDE]))
if boid.pos[1] > params.SCREEN_HEIGHT - params.BOX_MARGIN:
boid.steer(np.array([0., -params.STEER_INSIDE]))
def seek_single(self, target_pos, boid):
d = utils.dist(boid.pos, target_pos)
steering = (
utils.normalize(target_pos - boid.pos) *
params.BOID_MAX_SPEED * min(d / params.R_SEEK, 1) -
boid.vel)
boid.steer(steering, alt_max=params.BOID_MAX_FORCE / 50)
def seek(self, target_boid):
"""Make all normal boids seek to go to a target."""
for boid in self.normal_boids:
self.seek_single(target_boid, boid)
def flee_single(self, target_pos, boid):
too_close = utils.dist2(boid.pos, target_pos) < params.R_FLEE**2
if too_close:
steering = (utils.normalize(boid.pos - target_pos) *
params.BOID_MAX_SPEED -
boid.vel)
boid.steer(steering, alt_max=params.BOID_MAX_FORCE / 10)
def flee(self, target_boid):
"""Make all normal boids fly away from a target."""
for boid in self.normal_boids:
self.flee_single(target_boid, boid)
def pursue_single(self, target_pos, target_vel, boid):
t = int(utils.norm(target_pos - boid.pos) / params.BOID_MAX_SPEED)
future_pos = target_pos + t * target_vel
self.seek_single(future_pos, boid)
def pursue(self, target_boid):
"""Make all normal boids pursue a target boid with anticipation."""
for boid in self.normal_boids:
self.pursue_single(target_boid.pos, target_boid.vel, boid)
def escape_single(self, target_pos, target_vel, boid):
t = int(utils.norm(target_pos - boid.pos) / params.BOID_MAX_SPEED)
future_pos = target_pos + t * target_vel
self.flee_single(future_pos, boid)
def escape(self, target_boid):
"""Make all normal boids escape a target boid with anticipation."""
for boid in self.normal_boids:
self.escape_single(target_boid.pos, target_boid.vel, boid)
def wander(self):
"""Make all boids wander around randomly."""
rands = 2 * np.random.rand(len(self.boids)) - 1
cos = np.cos([b.wandering_angle for b in self.boids])
sin = np.sin([b.wandering_angle for b in self.boids])
for i, boid in enumerate(self.boids):
nvel = utils.normalize(boid.vel)
# calculate circle center
circle_center = nvel * params.WANDER_DIST
# calculate displacement force
c, s = cos[i], sin[i]
displacement = np.dot(
np.array([[c, -s], [s, c]]), nvel * params.WANDER_RADIUS)
boid.steer(circle_center + displacement)
boid.wandering_angle += params.WANDER_ANGLE * rands[i]
def find_most_threatening_obstacle(self, boid, aheads):
most_threatening = None
distance_to_most_threatening = float('inf')
for obstacle in self.obstacles:
norms = [utils.norm2(obstacle.pos - ahead) for ahead in aheads]
if all(n > obstacle.radius * obstacle.radius for n in norms):
continue
distance_to_obstacle = utils.dist2(boid.pos, obstacle.pos)
if most_threatening is not None and \
distance_to_obstacle > distance_to_most_threatening:
continue
most_threatening = obstacle
distance_to_most_threatening = utils.dist2(boid.pos,
most_threatening.pos)
return most_threatening
def avoid_collision(self):
"""Avoid collisions between boids and obstacles."""
for boid in self.boids:
ahead = boid.pos + boid.vel / params.BOID_MAX_SPEED * \
params.MAX_SEE_AHEAD
ahead2 = boid.pos + boid.vel / params.BOID_MAX_SPEED / 2 * \
params.MAX_SEE_AHEAD
most_threatening = self.find_most_threatening_obstacle(
boid, [ahead, ahead2, boid.pos])
if most_threatening is not None:
steering = utils.normalize(ahead - most_threatening.pos)
steering *= params.MAX_AVOID_FORCE
boid.steer(steering)
def separate_single(self, boid):
number_of_neighbors = 0
force = np.zeros(2)
for other_boid in self.boids:
if boid == other_boid:
continue
elif pygame.sprite.collide_rect(boid, other_boid):
force -= other_boid.pos - boid.pos
number_of_neighbors += 1
if number_of_neighbors:
force /= number_of_neighbors
boid.steer(utils.normalize(force) * params.MAX_SEPARATION_FORCE)
def separate(self):
for boid in self.boids:
self.separate_single(boid)
def follow_leader(self, leader):
"""Make all normal boids follow a leader.
Boids stay at a certain distance from the leader.
They move away when in the leader's path.
They avoid cluttering when behind the leader.
"""
nvel = utils.normalize(leader.vel)
behind = leader.pos - nvel * params.LEADER_BEHIND_DIST
ahead = leader.pos + nvel * params.LEADER_AHEAD_DIST
for boid in self.normal_boids:
self.seek_single(behind, boid)
self.escape_single(ahead, leader.vel, boid)
def align(self):
"""Make all boids to align their velocities."""
r2 = params.ALIGN_RADIUS * params.ALIGN_RADIUS
# find the neighbors
boids = list(self.normal_boids)
neighbors = [[] for boid in boids]
for i, boid in enumerate(boids):
for j, other_boid in enumerate(boids):
if j in neighbors[i]:
continue
elif boid == other_boid:
continue
elif utils.dist2(boid.pos, other_boid.pos) < r2:
neighbors[i].append(j)
neighbors[j].append(i)
for i, boid in enumerate(boids):
number_of_neighbors = len(neighbors[i])
if number_of_neighbors:
desired = np.zeros(2)
for j in neighbors[i]:
desired += boids[j].vel
boid.steer(desired / number_of_neighbors - boid.vel)
def flock(self):
"""Simulate flocking behaviour : alignment + separation + cohesion."""
self.align()
for boid in self.boids:
self.separate_single(boid)
def update(self, motion_event, click_event):
# apply steering behaviours
if self.leader_boid:
target = self.leader_boid.sprite
self.behaviours['pursue'] and self.pursue(target)
self.behaviours['escape'] and self.escape(target)
self.behaviours['follow leader'] and self.follow_leader(target)
self.behaviours['wander'] and self.wander()
if self.behaviours['avoid collision'] and self.obstacles:
self.avoid_collision()
self.behaviours['align'] and self.align()
self.behaviours['separate'] and self.separate()
self.remain_in_screen()
# update all boids
for boid in self.boids:
boid.update()
def display(self, screen):
for obstacle in self.obstacles:
obstacle.display(screen)
for boid in self.boids:
boid.display(screen, debug=params.DEBUG)
for boid in self.boids:
boid.reset_frame()
| <filename>pyboids/app/flock.py
"""Flock class."""
import pygame
import numpy as np
from . import params, utils
from .boid import Boid, LeaderBoid
from .obstacle import Obstacle
class Flock(pygame.sprite.Sprite):
"""Represents a set of boids that obey to certain behaviours."""
def __init__(self):
super().__init__()
self.normal_boids = pygame.sprite.Group()
self.leader_boid = pygame.sprite.GroupSingle()
self.boids = pygame.sprite.Group()
self.obstacles = pygame.sprite.Group()
self.behaviours = {
'pursue': False,
'escape': False,
'wander': True,
'avoid collision': True,
'follow leader': False,
'align': False,
'separate': False,
}
self.kinds = ['normal-boid', 'leader-boid', 'obstacle']
self.add_kind = 'normal-boid'
def switch_element(self):
self.kinds = np.roll(self.kinds, -1)
self.add_kind = self.kinds[0]
def add_element(self, pos):
"""Add a boid at pos.
The type of boid is the current add_kind value.
"""
angle = np.pi * (2 * np.random.rand() - 1)
vel = params.BOID_MAX_SPEED * np.array([np.cos(angle), np.sin(angle)])
if self.add_kind == 'normal-boid':
self.normal_boids.add(Boid(pos=np.array(pos), vel=vel))
self.boids.add(self.normal_boids)
elif self.add_kind == 'leader-boid':
self.boids.remove(self.leader_boid)
self.leader_boid.add(LeaderBoid(pos=np.array(pos), vel=vel))
self.boids.add(self.leader_boid)
elif self.add_kind == 'obstacle':
self.obstacles.add(Obstacle(pos=pos))
def remain_in_screen(self):
for boid in self.boids:
if boid.pos[0] > params.SCREEN_WIDTH - params.BOX_MARGIN:
boid.steer(np.array([-params.STEER_INSIDE, 0.]))
if boid.pos[0] < params.BOX_MARGIN:
boid.steer(np.array([params.STEER_INSIDE, 0.]))
if boid.pos[1] < params.BOX_MARGIN:
boid.steer(np.array([0., params.STEER_INSIDE]))
if boid.pos[1] > params.SCREEN_HEIGHT - params.BOX_MARGIN:
boid.steer(np.array([0., -params.STEER_INSIDE]))
def seek_single(self, target_pos, boid):
d = utils.dist(boid.pos, target_pos)
steering = (
utils.normalize(target_pos - boid.pos) *
params.BOID_MAX_SPEED * min(d / params.R_SEEK, 1) -
boid.vel)
boid.steer(steering, alt_max=params.BOID_MAX_FORCE / 50)
def seek(self, target_boid):
"""Make all normal boids seek to go to a target."""
for boid in self.normal_boids:
self.seek_single(target_boid, boid)
def flee_single(self, target_pos, boid):
too_close = utils.dist2(boid.pos, target_pos) < params.R_FLEE**2
if too_close:
steering = (utils.normalize(boid.pos - target_pos) *
params.BOID_MAX_SPEED -
boid.vel)
boid.steer(steering, alt_max=params.BOID_MAX_FORCE / 10)
def flee(self, target_boid):
"""Make all normal boids fly away from a target."""
for boid in self.normal_boids:
self.flee_single(target_boid, boid)
def pursue_single(self, target_pos, target_vel, boid):
t = int(utils.norm(target_pos - boid.pos) / params.BOID_MAX_SPEED)
future_pos = target_pos + t * target_vel
self.seek_single(future_pos, boid)
def pursue(self, target_boid):
"""Make all normal boids pursue a target boid with anticipation."""
for boid in self.normal_boids:
self.pursue_single(target_boid.pos, target_boid.vel, boid)
def escape_single(self, target_pos, target_vel, boid):
t = int(utils.norm(target_pos - boid.pos) / params.BOID_MAX_SPEED)
future_pos = target_pos + t * target_vel
self.flee_single(future_pos, boid)
def escape(self, target_boid):
"""Make all normal boids escape a target boid with anticipation."""
for boid in self.normal_boids:
self.escape_single(target_boid.pos, target_boid.vel, boid)
def wander(self):
"""Make all boids wander around randomly."""
rands = 2 * np.random.rand(len(self.boids)) - 1
cos = np.cos([b.wandering_angle for b in self.boids])
sin = np.sin([b.wandering_angle for b in self.boids])
for i, boid in enumerate(self.boids):
nvel = utils.normalize(boid.vel)
# calculate circle center
circle_center = nvel * params.WANDER_DIST
# calculate displacement force
c, s = cos[i], sin[i]
displacement = np.dot(
np.array([[c, -s], [s, c]]), nvel * params.WANDER_RADIUS)
boid.steer(circle_center + displacement)
boid.wandering_angle += params.WANDER_ANGLE * rands[i]
def find_most_threatening_obstacle(self, boid, aheads):
most_threatening = None
distance_to_most_threatening = float('inf')
for obstacle in self.obstacles:
norms = [utils.norm2(obstacle.pos - ahead) for ahead in aheads]
if all(n > obstacle.radius * obstacle.radius for n in norms):
continue
distance_to_obstacle = utils.dist2(boid.pos, obstacle.pos)
if most_threatening is not None and \
distance_to_obstacle > distance_to_most_threatening:
continue
most_threatening = obstacle
distance_to_most_threatening = utils.dist2(boid.pos,
most_threatening.pos)
return most_threatening
def avoid_collision(self):
"""Avoid collisions between boids and obstacles."""
for boid in self.boids:
ahead = boid.pos + boid.vel / params.BOID_MAX_SPEED * \
params.MAX_SEE_AHEAD
ahead2 = boid.pos + boid.vel / params.BOID_MAX_SPEED / 2 * \
params.MAX_SEE_AHEAD
most_threatening = self.find_most_threatening_obstacle(
boid, [ahead, ahead2, boid.pos])
if most_threatening is not None:
steering = utils.normalize(ahead - most_threatening.pos)
steering *= params.MAX_AVOID_FORCE
boid.steer(steering)
def separate_single(self, boid):
number_of_neighbors = 0
force = np.zeros(2)
for other_boid in self.boids:
if boid == other_boid:
continue
elif pygame.sprite.collide_rect(boid, other_boid):
force -= other_boid.pos - boid.pos
number_of_neighbors += 1
if number_of_neighbors:
force /= number_of_neighbors
boid.steer(utils.normalize(force) * params.MAX_SEPARATION_FORCE)
def separate(self):
for boid in self.boids:
self.separate_single(boid)
def follow_leader(self, leader):
"""Make all normal boids follow a leader.
Boids stay at a certain distance from the leader.
They move away when in the leader's path.
They avoid cluttering when behind the leader.
"""
nvel = utils.normalize(leader.vel)
behind = leader.pos - nvel * params.LEADER_BEHIND_DIST
ahead = leader.pos + nvel * params.LEADER_AHEAD_DIST
for boid in self.normal_boids:
self.seek_single(behind, boid)
self.escape_single(ahead, leader.vel, boid)
def align(self):
"""Make all boids to align their velocities."""
r2 = params.ALIGN_RADIUS * params.ALIGN_RADIUS
# find the neighbors
boids = list(self.normal_boids)
neighbors = [[] for boid in boids]
for i, boid in enumerate(boids):
for j, other_boid in enumerate(boids):
if j in neighbors[i]:
continue
elif boid == other_boid:
continue
elif utils.dist2(boid.pos, other_boid.pos) < r2:
neighbors[i].append(j)
neighbors[j].append(i)
for i, boid in enumerate(boids):
number_of_neighbors = len(neighbors[i])
if number_of_neighbors:
desired = np.zeros(2)
for j in neighbors[i]:
desired += boids[j].vel
boid.steer(desired / number_of_neighbors - boid.vel)
def flock(self):
"""Simulate flocking behaviour : alignment + separation + cohesion."""
self.align()
for boid in self.boids:
self.separate_single(boid)
def update(self, motion_event, click_event):
# apply steering behaviours
if self.leader_boid:
target = self.leader_boid.sprite
self.behaviours['pursue'] and self.pursue(target)
self.behaviours['escape'] and self.escape(target)
self.behaviours['follow leader'] and self.follow_leader(target)
self.behaviours['wander'] and self.wander()
if self.behaviours['avoid collision'] and self.obstacles:
self.avoid_collision()
self.behaviours['align'] and self.align()
self.behaviours['separate'] and self.separate()
self.remain_in_screen()
# update all boids
for boid in self.boids:
boid.update()
def display(self, screen):
for obstacle in self.obstacles:
obstacle.display(screen)
for boid in self.boids:
boid.display(screen, debug=params.DEBUG)
for boid in self.boids:
boid.reset_frame()
| en | 0.837065 | Flock class. Represents a set of boids that obey to certain behaviours. Add a boid at pos. The type of boid is the current add_kind value. Make all normal boids seek to go to a target. Make all normal boids fly away from a target. Make all normal boids pursue a target boid with anticipation. Make all normal boids escape a target boid with anticipation. Make all boids wander around randomly. # calculate circle center # calculate displacement force Avoid collisions between boids and obstacles. Make all normal boids follow a leader. Boids stay at a certain distance from the leader. They move away when in the leader's path. They avoid cluttering when behind the leader. Make all boids to align their velocities. # find the neighbors Simulate flocking behaviour : alignment + separation + cohesion. # apply steering behaviours # update all boids | 3.296532 | 3 |
Python/easy/0021_merge_two_sorted_lists.py | CalmScout/LeetCode | 0 | 6630393 | """
Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.
Example:
Input: 1->2->4, 1->3->4
Output: 1->1->2->3->4->4
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
result = [self.val]
pointer = self
while pointer.next:
pointer = pointer.next
result.append(pointer.val)
return str(result)
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
result = ListNode(42)
l3 = result
while l1 and l2:
if l1.val <= l2.val:
l3.next = ListNode(l1.val)
l1 = l1.next
else:
l3.next = ListNode(l2.val)
l2 = l2.next
l3 = l3.next
tail = l1 if l2 is None else l2
l3.next = tail
return result.next
if __name__ == "__main__":
l1 = ListNode(1)
l1.next = ListNode(2)
l1.next.next = ListNode(4)
l2 = ListNode(1)
l2.next = ListNode(3)
l2.next.next = ListNode(4)
out = str([1, 1, 2, 3, 4, 4])
actual = Solution().mergeTwoLists(l1, l2).__repr__()
assert out == actual, (out, actual)
l1 = ListNode(-9)
l1.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(7)
out = str([-9, 3, 5, 7])
actual = Solution().mergeTwoLists(l1, l2).__repr__()
assert out == actual, (out, actual)
| """
Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.
Example:
Input: 1->2->4, 1->3->4
Output: 1->1->2->3->4->4
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
result = [self.val]
pointer = self
while pointer.next:
pointer = pointer.next
result.append(pointer.val)
return str(result)
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
result = ListNode(42)
l3 = result
while l1 and l2:
if l1.val <= l2.val:
l3.next = ListNode(l1.val)
l1 = l1.next
else:
l3.next = ListNode(l2.val)
l2 = l2.next
l3 = l3.next
tail = l1 if l2 is None else l2
l3.next = tail
return result.next
if __name__ == "__main__":
l1 = ListNode(1)
l1.next = ListNode(2)
l1.next.next = ListNode(4)
l2 = ListNode(1)
l2.next = ListNode(3)
l2.next.next = ListNode(4)
out = str([1, 1, 2, 3, 4, 4])
actual = Solution().mergeTwoLists(l1, l2).__repr__()
assert out == actual, (out, actual)
l1 = ListNode(-9)
l1.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(7)
out = str([-9, 3, 5, 7])
actual = Solution().mergeTwoLists(l1, l2).__repr__()
assert out == actual, (out, actual)
| en | 0.827593 | Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists. Example: Input: 1->2->4, 1->3->4 Output: 1->1->2->3->4->4 # Definition for singly-linked list. | 3.989412 | 4 |
city_game.py | AndyBourne/city_game | 0 | 6630394 | <gh_stars>0
#!/usr/bin/python3。6
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
import random
# import statsmodels.api as sm
# from statsmodels.nonparametric.kde import KDEUnivariate
# from statsmodels.nonparametric import smoothers_lowess
from pandas import Series, DataFrame
# from patsy import dmatrices
# from sklearn import datasets, svm
from pypinyin import pinyin, lazy_pinyin, Style
data = pd.read_csv("/Users/andybourne/Documents/GitHub/city_game/data/region.csv")
test_data = data[['REGION_NAME', 'REGION_NAME_EN']]
#name = input(u"Input City Name:")
name = input(u"输入中国城市的名字:")
left_pinyin_name = lazy_pinyin(name)
left_A = left_pinyin_name[-1]
left_A_upper = left_A.upper()
# print(left_A)
# print(left_A_upper[0])
# test_data.head(3)
test_data_frame = DataFrame(test_data)
test_data_frame_list = np.array(test_data_frame)
train_x_list=test_data_frame_list.tolist()
# print(train_x_list)
new_answer = []
for each_list in train_x_list:
if isinstance(each_list,list):
for new_each in each_list:
if str(new_each)[0] == left_A_upper[0]:
# print(new_each)
new_answer.append(new_each)
# print(new_answer)
# print(len(new_answer))
new_answer_hanzi = []
for each_list in train_x_list:
if isinstance(each_list,list):
for new_each in each_list:
# print(new_each)
new_answer_hanzi.append(new_each)
# print(new_answer_hanzi)
# print(len(new_answer_hanzi))
# print(new_answer_hanzi.index(new_answer[random.randint(0,len(new_answer))]))
print(new_answer_hanzi[new_answer_hanzi.index(new_answer[random.randint(0,len(new_answer))])-1]) | #!/usr/bin/python3。6
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
import random
# import statsmodels.api as sm
# from statsmodels.nonparametric.kde import KDEUnivariate
# from statsmodels.nonparametric import smoothers_lowess
from pandas import Series, DataFrame
# from patsy import dmatrices
# from sklearn import datasets, svm
from pypinyin import pinyin, lazy_pinyin, Style
data = pd.read_csv("/Users/andybourne/Documents/GitHub/city_game/data/region.csv")
test_data = data[['REGION_NAME', 'REGION_NAME_EN']]
#name = input(u"Input City Name:")
name = input(u"输入中国城市的名字:")
left_pinyin_name = lazy_pinyin(name)
left_A = left_pinyin_name[-1]
left_A_upper = left_A.upper()
# print(left_A)
# print(left_A_upper[0])
# test_data.head(3)
test_data_frame = DataFrame(test_data)
test_data_frame_list = np.array(test_data_frame)
train_x_list=test_data_frame_list.tolist()
# print(train_x_list)
new_answer = []
for each_list in train_x_list:
if isinstance(each_list,list):
for new_each in each_list:
if str(new_each)[0] == left_A_upper[0]:
# print(new_each)
new_answer.append(new_each)
# print(new_answer)
# print(len(new_answer))
new_answer_hanzi = []
for each_list in train_x_list:
if isinstance(each_list,list):
for new_each in each_list:
# print(new_each)
new_answer_hanzi.append(new_each)
# print(new_answer_hanzi)
# print(len(new_answer_hanzi))
# print(new_answer_hanzi.index(new_answer[random.randint(0,len(new_answer))]))
print(new_answer_hanzi[new_answer_hanzi.index(new_answer[random.randint(0,len(new_answer))])-1]) | en | 0.492131 | #!/usr/bin/python3。6 # -*- coding:utf-8 -*- # import statsmodels.api as sm # from statsmodels.nonparametric.kde import KDEUnivariate # from statsmodels.nonparametric import smoothers_lowess # from patsy import dmatrices # from sklearn import datasets, svm #name = input(u"Input City Name:") # print(left_A) # print(left_A_upper[0]) # test_data.head(3) # print(train_x_list) # print(new_each) # print(new_answer) # print(len(new_answer)) # print(new_each) # print(new_answer_hanzi) # print(len(new_answer_hanzi)) # print(new_answer_hanzi.index(new_answer[random.randint(0,len(new_answer))])) | 2.666683 | 3 |
Backend/config/config.py | sebastcotd/FisiBici | 0 | 6630395 | '''
config.py: modulo donde se configura la aplicación
'''
from flask import Flask
from flask_cors import CORS
from mongoengine import connect
def config_app(app):
app.secret_key = 'clavesecreta'
DB_URI = "mongodb+srv://Mauricio:1234@fisibici"
DB_URI += ".cpmx7.mongodb.net/SistemaBicicletas?retryWrites=true&w=majority"
connect(host=DB_URI)
CORS(app=app, supports_credentials=True)
def config_app_production(app):
config_app(app)
app.config.update(
SERVER_NAME='fisi-bici.herokuapp.com',
SESSION_COOKIE_NAME='fisi-bici.herokuapp.com',
SESSION_COOKIE_DOMAIN='fisi-bici.herokuapp.com',
)
def config_app_development(app):
config_app(app)
app.config.update(
SERVER_NAME='127.0.0.1:5000',
SESSION_COOKIE_NAME='127.0.0.1:5000',
SESSION_COOKIE_DOMAIN='127.0.0.1:5000',
)
| '''
config.py: modulo donde se configura la aplicación
'''
from flask import Flask
from flask_cors import CORS
from mongoengine import connect
def config_app(app):
app.secret_key = 'clavesecreta'
DB_URI = "mongodb+srv://Mauricio:1234@fisibici"
DB_URI += ".cpmx7.mongodb.net/SistemaBicicletas?retryWrites=true&w=majority"
connect(host=DB_URI)
CORS(app=app, supports_credentials=True)
def config_app_production(app):
config_app(app)
app.config.update(
SERVER_NAME='fisi-bici.herokuapp.com',
SESSION_COOKIE_NAME='fisi-bici.herokuapp.com',
SESSION_COOKIE_DOMAIN='fisi-bici.herokuapp.com',
)
def config_app_development(app):
config_app(app)
app.config.update(
SERVER_NAME='127.0.0.1:5000',
SESSION_COOKIE_NAME='127.0.0.1:5000',
SESSION_COOKIE_DOMAIN='127.0.0.1:5000',
)
| es | 0.888076 | config.py: modulo donde se configura la aplicación | 2.001259 | 2 |
src/GaIA/pkgs/nmap/nmap-6.40/zenmap/radialnet/util/integration.py | uninth/UNItools | 0 | 6630396 | <reponame>uninth/UNItools
# vim: set fileencoding=utf-8 :
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact <EMAIL>). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the special and conditions of the license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact <EMAIL> with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email <EMAIL> for *
# * further information. *
# * *
# * If you received these files with a written license agreement or *
# * contract stating terms other than the terms above, then that *
# * alternative license agreement takes precedence over these comments. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the <EMAIL> mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
from radialnet.core.Graph import *
from radialnet.gui.RadialNet import NetNode
import zenmapCore.NmapParser
import math
import re
COLORS = [(0.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 0.0, 0.0)]
BASE_RADIUS = 5.5
NONE_RADIUS = 4.5
def set_node_info(node, host):
"""
"""
node.set_host(host)
radius = BASE_RADIUS + 2 * math.log(node.get_info("number_of_open_ports") + 1)
node.set_draw_info({"color":COLORS[node.get_info("vulnerability_score")],
"radius":radius})
class TracerouteHostInfo(object):
"""This is a minimal implementation of HostInfo, sufficient to
represent the information in an intermediate traceroute hop."""
def __init__(self):
self.ip = None
self.ipv6 = None
self.mac = None
self.hostname = None
self.ports = []
self.extraports = []
self.osmatches = []
def get_hostname(self):
return self.hostname
def get_best_osmatch(self):
if not self.osmatches:
return None
def osmatch_key(osmatch):
try:
return -float(osmatch["accuracy"])
except ValueError:
return 0
return sorted(self.osmatches, key = osmatch_key)[0]
hostnames = property(lambda self: self.hostname and [self.hostname] or [])
def make_graph_from_hosts(hosts):
#hosts = parser.get_root().search_children('host', deep=True)
graph = Graph()
nodes = list()
node_cache = {}
# Setting initial reference host
main_node = NetNode()
nodes.append(main_node)
localhost = TracerouteHostInfo()
localhost.ip = {"addr": "127.0.0.1/8", "type": "ipv4"}
localhost.hostname = "localhost"
main_node.set_host(localhost)
main_node.set_draw_info({"valid": True, "color":(0,0,0), "radius":NONE_RADIUS})
#Save endpoints for attaching scanned hosts to
endpoints = {}
# For each host in hosts just mount the graph
for host in hosts:
trace = host.trace
endpoints[host] = nodes[0]
hops = trace.get("hops")
# If host has traceroute information mount graph
if hops is not None and len(hops) > 0:
prev_node = nodes[0]
hops = trace.get("hops", [])
ttls = [int(hop["ttl"]) for hop in hops]
# Getting nodes of host by ttl
for ttl in range(1, max(ttls) + 1):
if ttl in ttls:
# Find a hop by ttl
hop = None
for h in hops:
if ttl == int(h["ttl"]):
hop = h
break
node = node_cache.get(hop["ipaddr"])
if node is None:
node = NetNode()
nodes.append(node)
hop_host = TracerouteHostInfo()
hop_host.ip = {"addr": hop["ipaddr"], "type": "", "vendor": ""}
node.set_draw_info({"valid":True})
node.set_draw_info({"color":(1,1,1),
"radius":NONE_RADIUS})
if hop["host"] != "":
hop_host.hostname = hop["host"]
node.set_host(hop_host)
node_cache[node.get_info("ip")] = node
rtt = hop["rtt"]
if rtt != "--":
graph.set_connection(node, prev_node, float(rtt))
else:
graph.set_connection(node, prev_node)
else:
node = NetNode()
nodes.append(node)
node.set_draw_info({"valid":False})
node.set_draw_info({"color":(1,1,1), "radius":NONE_RADIUS})
graph.set_connection(node, prev_node)
prev_node = node
endpoints[host] = node
# For each fully scanned host
for host in hosts:
ip = host.ip
if ip is None:
ip = host.ipv6
node = node_cache.get(ip["addr"])
if node is None:
node = NetNode()
nodes.append(node)
node.set_draw_info({"no_route":True})
graph.set_connection(node, endpoints[host])
node.set_draw_info({"valid":True})
node.set_draw_info({"scanned":True})
set_node_info(node, host)
node_cache[node.get_info("ip")] = node
graph.set_nodes(nodes)
graph.set_main_node(main_node)
return graph
| # vim: set fileencoding=utf-8 :
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact <EMAIL>). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the special and conditions of the license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact <EMAIL> with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email <EMAIL> for *
# * further information. *
# * *
# * If you received these files with a written license agreement or *
# * contract stating terms other than the terms above, then that *
# * alternative license agreement takes precedence over these comments. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the <EMAIL> mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
from radialnet.core.Graph import *
from radialnet.gui.RadialNet import NetNode
import zenmapCore.NmapParser
import math
import re
COLORS = [(0.0, 1.0, 0.0),
(1.0, 1.0, 0.0),
(1.0, 0.0, 0.0)]
BASE_RADIUS = 5.5
NONE_RADIUS = 4.5
def set_node_info(node, host):
"""
"""
node.set_host(host)
radius = BASE_RADIUS + 2 * math.log(node.get_info("number_of_open_ports") + 1)
node.set_draw_info({"color":COLORS[node.get_info("vulnerability_score")],
"radius":radius})
class TracerouteHostInfo(object):
"""This is a minimal implementation of HostInfo, sufficient to
represent the information in an intermediate traceroute hop."""
def __init__(self):
self.ip = None
self.ipv6 = None
self.mac = None
self.hostname = None
self.ports = []
self.extraports = []
self.osmatches = []
def get_hostname(self):
return self.hostname
def get_best_osmatch(self):
if not self.osmatches:
return None
def osmatch_key(osmatch):
try:
return -float(osmatch["accuracy"])
except ValueError:
return 0
return sorted(self.osmatches, key = osmatch_key)[0]
hostnames = property(lambda self: self.hostname and [self.hostname] or [])
def make_graph_from_hosts(hosts):
#hosts = parser.get_root().search_children('host', deep=True)
graph = Graph()
nodes = list()
node_cache = {}
# Setting initial reference host
main_node = NetNode()
nodes.append(main_node)
localhost = TracerouteHostInfo()
localhost.ip = {"addr": "127.0.0.1/8", "type": "ipv4"}
localhost.hostname = "localhost"
main_node.set_host(localhost)
main_node.set_draw_info({"valid": True, "color":(0,0,0), "radius":NONE_RADIUS})
#Save endpoints for attaching scanned hosts to
endpoints = {}
# For each host in hosts just mount the graph
for host in hosts:
trace = host.trace
endpoints[host] = nodes[0]
hops = trace.get("hops")
# If host has traceroute information mount graph
if hops is not None and len(hops) > 0:
prev_node = nodes[0]
hops = trace.get("hops", [])
ttls = [int(hop["ttl"]) for hop in hops]
# Getting nodes of host by ttl
for ttl in range(1, max(ttls) + 1):
if ttl in ttls:
# Find a hop by ttl
hop = None
for h in hops:
if ttl == int(h["ttl"]):
hop = h
break
node = node_cache.get(hop["ipaddr"])
if node is None:
node = NetNode()
nodes.append(node)
hop_host = TracerouteHostInfo()
hop_host.ip = {"addr": hop["ipaddr"], "type": "", "vendor": ""}
node.set_draw_info({"valid":True})
node.set_draw_info({"color":(1,1,1),
"radius":NONE_RADIUS})
if hop["host"] != "":
hop_host.hostname = hop["host"]
node.set_host(hop_host)
node_cache[node.get_info("ip")] = node
rtt = hop["rtt"]
if rtt != "--":
graph.set_connection(node, prev_node, float(rtt))
else:
graph.set_connection(node, prev_node)
else:
node = NetNode()
nodes.append(node)
node.set_draw_info({"valid":False})
node.set_draw_info({"color":(1,1,1), "radius":NONE_RADIUS})
graph.set_connection(node, prev_node)
prev_node = node
endpoints[host] = node
# For each fully scanned host
for host in hosts:
ip = host.ip
if ip is None:
ip = host.ipv6
node = node_cache.get(ip["addr"])
if node is None:
node = NetNode()
nodes.append(node)
node.set_draw_info({"no_route":True})
graph.set_connection(node, endpoints[host])
node.set_draw_info({"valid":True})
node.set_draw_info({"scanned":True})
set_node_info(node, host)
node_cache[node.get_info("ip")] = node
graph.set_nodes(nodes)
graph.set_main_node(main_node)
return graph | en | 0.892897 | # vim: set fileencoding=utf-8 : # ***********************IMPORTANT NMAP LICENSE TERMS************************ # * * # * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is * # * also a registered trademark of Insecure.Com LLC. This program is free * # * software; you may redistribute and/or modify it under the terms of the * # * GNU General Public License as published by the Free Software * # * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS * # * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, * # * modify, and redistribute this software under certain conditions. If * # * you wish to embed Nmap technology into proprietary software, we sell * # * alternative licenses (contact <EMAIL>). Dozens of software * # * vendors already license Nmap technology such as host discovery, port * # * scanning, OS detection, version detection, and the Nmap Scripting * # * Engine. * # * * # * Note that the GPL places important restrictions on "derivative works", * # * yet it does not provide a detailed definition of that term. To avoid * # * misunderstandings, we interpret that term as broadly as copyright law * # * allows. For example, we consider an application to constitute a * # * derivative work for the purpose of this license if it does any of the * # * following with any software or content covered by this license * # * ("Covered Software"): * # * * # * o Integrates source code from Covered Software. * # * * # * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db * # * or nmap-service-probes. * # * * # * o Is designed specifically to execute Covered Software and parse the * # * results (as opposed to typical shell or execution-menu apps, which will * # * execute anything you tell them to). * # * * # * o Includes Covered Software in a proprietary executable installer. The * # * installers produced by InstallShield are an example of this. Including * # * Nmap with other software in compressed or archival form does not * # * trigger this provision, provided appropriate open source decompression * # * or de-archiving software is widely available for no charge. For the * # * purposes of this license, an installer is considered to include Covered * # * Software even if it actually retrieves a copy of Covered Software from * # * another source during runtime (such as by downloading it from the * # * Internet). * # * * # * o Links (statically or dynamically) to a library which does any of the * # * above. * # * * # * o Executes a helper program, module, or script to do any of the above. * # * * # * This list is not exclusive, but is meant to clarify our interpretation * # * of derived works with some common examples. Other people may interpret * # * the plain GPL differently, so we consider this a special exception to * # * the GPL that we apply to Covered Software. Works which meet any of * # * these conditions must conform to all of the terms of this license, * # * particularly including the GPL Section 3 requirements of providing * # * source code and allowing free redistribution of the work as a whole. * # * * # * As another special exception to the GPL terms, Insecure.Com LLC grants * # * permission to link the code of this program with any version of the * # * OpenSSL library which is distributed under a license identical to that * # * listed in the included docs/licenses/OpenSSL.txt file, and distribute * # * linked combinations including the two. * # * * # * Any redistribution of Covered Software, including any derived works, * # * must obey and carry forward all of the terms of this license, including * # * obeying all GPL rules and restrictions. For example, source code of * # * the whole work must be provided and free redistribution must be * # * allowed. All GPL references to "this License", are to be treated as * # * including the special and conditions of the license text as well. * # * * # * Because this license imposes special exceptions to the GPL, Covered * # * Work may not be combined (even as part of a larger work) with plain GPL * # * software. The terms, conditions, and exceptions of this license must * # * be included as well. This license is incompatible with some other open * # * source licenses as well. In some cases we can relicense portions of * # * Nmap or grant special permissions to use it in other open source * # * software. Please contact <EMAIL> with any such requests. * # * Similarly, we don't incorporate incompatible open source software into * # * Covered Software without special permission from the copyright holders. * # * * # * If you have any questions about the licensing restrictions on using * # * Nmap in other works, are happy to help. As mentioned above, we also * # * offer alternative license to integrate Nmap into proprietary * # * applications and appliances. These contracts have been sold to dozens * # * of software vendors, and generally include a perpetual license as well * # * as providing for priority support and updates. They also fund the * # * continued development of Nmap. Please email <EMAIL> for * # * further information. * # * * # * If you received these files with a written license agreement or * # * contract stating terms other than the terms above, then that * # * alternative license agreement takes precedence over these comments. * # * * # * Source is provided to this software because we believe users have a * # * right to know exactly what a program is going to do before they run it. * # * This also allows you to audit the software for security holes (none * # * have been found so far). * # * * # * Source code also allows you to port Nmap to new platforms, fix bugs, * # * and add new features. You are highly encouraged to send your changes * # * to the <EMAIL> mailing list for possible incorporation into the * # * main distribution. By sending these changes to Fyodor or one of the * # * Insecure.Org development mailing lists, or checking them into the Nmap * # * source code repository, it is understood (unless you specify otherwise) * # * that you are offering the Nmap Project (Insecure.Com LLC) the * # * unlimited, non-exclusive right to reuse, modify, and relicense the * # * code. Nmap will always be available Open Source, but this is important * # * because the inability to relicense code has caused devastating problems * # * for other Free Software projects (such as KDE and NASM). We also * # * occasionally relicense the code to third parties as discussed above. * # * If you wish to specify special license conditions of your * # * contributions, just say so when you send them. * # * * # * This program is distributed in the hope that it will be useful, but * # * WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap * # * license file for more details (it's in a COPYING file included with * # * Nmap, and also available from https://svn.nmap.org/nmap/COPYING * # * * # ***************************************************************************/ This is a minimal implementation of HostInfo, sufficient to represent the information in an intermediate traceroute hop. #hosts = parser.get_root().search_children('host', deep=True) # Setting initial reference host #Save endpoints for attaching scanned hosts to # For each host in hosts just mount the graph # If host has traceroute information mount graph # Getting nodes of host by ttl # Find a hop by ttl # For each fully scanned host | 1.199422 | 1 |
AbstractFactory/Chair/MediumChair.py | ahaile505/Python_Design_Patterns | 0 | 6630397 | from .IChair import IChair
class MediumChair(IChair):
"""The Medium Chair Concrete Class which implements the IChair interface"""
def __init__(self):
self._height = 60
self._width = 60
self._depth = 60
def dimensions(self):
return {"width": self._width, "depth": self._depth, "height": self._height}
| from .IChair import IChair
class MediumChair(IChair):
"""The Medium Chair Concrete Class which implements the IChair interface"""
def __init__(self):
self._height = 60
self._width = 60
self._depth = 60
def dimensions(self):
return {"width": self._width, "depth": self._depth, "height": self._height}
| en | 0.861634 | The Medium Chair Concrete Class which implements the IChair interface | 2.77532 | 3 |
opportunities/migrations/0010_opportunity_category.py | MrEscape54/CRM | 0 | 6630398 | # Generated by Django 3.1.4 on 2020-12-31 21:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('opportunities', '0009_auto_20201231_1720'),
]
operations = [
migrations.AddField(
model_name='opportunity',
name='category',
field=models.CharField(choices=[('Support', 'Support'), ('Implementation', 'Implamentation'), ('Assessment', 'Assessment'), ('Outsourcing', 'Outsourcing')], default='Support', max_length=50, verbose_name='Category'),
preserve_default=False,
),
]
| # Generated by Django 3.1.4 on 2020-12-31 21:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('opportunities', '0009_auto_20201231_1720'),
]
operations = [
migrations.AddField(
model_name='opportunity',
name='category',
field=models.CharField(choices=[('Support', 'Support'), ('Implementation', 'Implamentation'), ('Assessment', 'Assessment'), ('Outsourcing', 'Outsourcing')], default='Support', max_length=50, verbose_name='Category'),
preserve_default=False,
),
]
| en | 0.809188 | # Generated by Django 3.1.4 on 2020-12-31 21:56 | 1.629438 | 2 |
proyectoprincipal/mainapp/queue.py | diegofdoruiz/proyectowww | 0 | 6630399 | from .models import Profile, Service, Location, Specialty, Turn, LocationOnService
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
import json
from django.db import transaction
class Queue():
# Usuarios de tipo cajeros, no se necesitan
users = User.objects.filter(groups__name='Cajero').order_by('pk')
# Ventanillas que están libres
windows_on_service = LocationOnService.objects.all().filter(status=1)
#Turnos esperando la atención, ordenados por hora de legada
turns = Turn.objects.all().filter(status=1).order_by()
def get_user_services(self, user):
specialty = user.profile.specialty
if specialty:
services = Service.objects.all().filter(specialty=specialty)
return services
else:
return {}
# retorna el id del siguiente turno que debe atender un usuario
def get_fisrt_turn_pk(self, user):
services = self.get_user_services(user)
first = None
# Turnos correspondientes a los servicios propios en orden de llegada
turns_of_service = Turn.objects.all().filter(service__in=services, status='1').order_by('created_at')
if turns_of_service:
return turns_of_service.first().pk
# Otros turnos en orden de llegada
other_turns = Turn.objects.all().filter(status='1').exclude(service__in=services).order_by('created_at')
if other_turns:
return other_turns.first().pk
return first
# Proceso crítico, el cual debe estar en lo posible preparado para la concurrencia.
# Retorna un turno, si la respuesta es un turno
# es porque el turno estaba disponible y le cambió el estado a '2' : 'calling',
# si la respuesta es None es porque debe seguir buscando un turno disponible
@transaction.atomic
def get_next(self, user):
pk = self.get_fisrt_turn_pk(user)
if pk:
with transaction.atomic():
turn = (
Turn.objects
.select_for_update()
.get(pk=pk)
)
if turn.status == '1':
turn.status = '2';
turn.save()
return turn
else:
return None
else:
return None
# Construir la cola para un usuario específico
def build_queue_for_user(self, user):
services = self.get_user_services(user)
turns = {}
cont = 1
# Turnos correspondientes a los servicios propios en orden de llegada
turns_of_service = Turn.objects.all().filter(service__in=services, status=1).order_by('created_at')
for turn_of_service in turns_of_service:
#turns.append(turn_of_service.code)
turns[cont] = turn_of_service.code
cont = cont + 1
# Otros turnos en orden de llegada
other_turns = Turn.objects.all().filter(status=1).exclude(service__in=services).order_by('created_at')
for other_turn in other_turns:
#turns.append(other_turn.code)
turns[cont] = other_turn.code
cont = cont + 1
return turns
def get_all_queue(self):
window_detail = {}
windows_detail = {}
windows_on_service = LocationOnService.objects.all().filter(status=1)
for window in windows_on_service:
window_detail['turns'] = self.build_queue_for_user(window.user)
windows_detail[window.user.pk] = window_detail
window = None
window_detail = {}
return windows_detail
| from .models import Profile, Service, Location, Specialty, Turn, LocationOnService
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
import json
from django.db import transaction
class Queue():
# Usuarios de tipo cajeros, no se necesitan
users = User.objects.filter(groups__name='Cajero').order_by('pk')
# Ventanillas que están libres
windows_on_service = LocationOnService.objects.all().filter(status=1)
#Turnos esperando la atención, ordenados por hora de legada
turns = Turn.objects.all().filter(status=1).order_by()
def get_user_services(self, user):
specialty = user.profile.specialty
if specialty:
services = Service.objects.all().filter(specialty=specialty)
return services
else:
return {}
# retorna el id del siguiente turno que debe atender un usuario
def get_fisrt_turn_pk(self, user):
services = self.get_user_services(user)
first = None
# Turnos correspondientes a los servicios propios en orden de llegada
turns_of_service = Turn.objects.all().filter(service__in=services, status='1').order_by('created_at')
if turns_of_service:
return turns_of_service.first().pk
# Otros turnos en orden de llegada
other_turns = Turn.objects.all().filter(status='1').exclude(service__in=services).order_by('created_at')
if other_turns:
return other_turns.first().pk
return first
# Proceso crítico, el cual debe estar en lo posible preparado para la concurrencia.
# Retorna un turno, si la respuesta es un turno
# es porque el turno estaba disponible y le cambió el estado a '2' : 'calling',
# si la respuesta es None es porque debe seguir buscando un turno disponible
@transaction.atomic
def get_next(self, user):
pk = self.get_fisrt_turn_pk(user)
if pk:
with transaction.atomic():
turn = (
Turn.objects
.select_for_update()
.get(pk=pk)
)
if turn.status == '1':
turn.status = '2';
turn.save()
return turn
else:
return None
else:
return None
# Construir la cola para un usuario específico
def build_queue_for_user(self, user):
services = self.get_user_services(user)
turns = {}
cont = 1
# Turnos correspondientes a los servicios propios en orden de llegada
turns_of_service = Turn.objects.all().filter(service__in=services, status=1).order_by('created_at')
for turn_of_service in turns_of_service:
#turns.append(turn_of_service.code)
turns[cont] = turn_of_service.code
cont = cont + 1
# Otros turnos en orden de llegada
other_turns = Turn.objects.all().filter(status=1).exclude(service__in=services).order_by('created_at')
for other_turn in other_turns:
#turns.append(other_turn.code)
turns[cont] = other_turn.code
cont = cont + 1
return turns
def get_all_queue(self):
window_detail = {}
windows_detail = {}
windows_on_service = LocationOnService.objects.all().filter(status=1)
for window in windows_on_service:
window_detail['turns'] = self.build_queue_for_user(window.user)
windows_detail[window.user.pk] = window_detail
window = None
window_detail = {}
return windows_detail
| es | 0.961179 | # Usuarios de tipo cajeros, no se necesitan # Ventanillas que están libres #Turnos esperando la atención, ordenados por hora de legada # retorna el id del siguiente turno que debe atender un usuario # Turnos correspondientes a los servicios propios en orden de llegada # Otros turnos en orden de llegada # Proceso crítico, el cual debe estar en lo posible preparado para la concurrencia. # Retorna un turno, si la respuesta es un turno # es porque el turno estaba disponible y le cambió el estado a '2' : 'calling', # si la respuesta es None es porque debe seguir buscando un turno disponible # Construir la cola para un usuario específico # Turnos correspondientes a los servicios propios en orden de llegada #turns.append(turn_of_service.code) # Otros turnos en orden de llegada #turns.append(other_turn.code) | 2.117294 | 2 |
chapter_9/sset/upload_data.py | LifeOfGame/mongodb_redis | 183 | 6630400 | import pymongo
import redis
handler = pymongo.MongoClient('mongodb://root:iamsuperuser@localhost').chapter_9.rank_data
client = redis.Redis()
rows = handler.find({}, {'_id': 0})
for row in rows:
client.zadd('rank', row['user_id'], row['score']) | import pymongo
import redis
handler = pymongo.MongoClient('mongodb://root:iamsuperuser@localhost').chapter_9.rank_data
client = redis.Redis()
rows = handler.find({}, {'_id': 0})
for row in rows:
client.zadd('rank', row['user_id'], row['score']) | none | 1 | 2.398525 | 2 |
|
Training Modules/03 Regex/3.07 Matrix Script/solution.py | azhari33/Python3-by-practice | 54 | 6630401 | #!/bin/python3
import math
import os
import random
import re
import sys
p = re.compile(r'(?<=\w)([\$\#\%\s]+)(?=\w)')
dem = sys.stdin.readline().split();
r = int(dem[0])
c = int(dem[1])
rows = [l for l in sys.stdin]
text = "";
for i in range(c):
for j in range(r):
text = text+rows[j][i]
print(p.sub(' ',text))
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
matrix = []
for _ in range(n):
matrix_item = input()
matrix.append(matrix_item)
| #!/bin/python3
import math
import os
import random
import re
import sys
p = re.compile(r'(?<=\w)([\$\#\%\s]+)(?=\w)')
dem = sys.stdin.readline().split();
r = int(dem[0])
c = int(dem[1])
rows = [l for l in sys.stdin]
text = "";
for i in range(c):
for j in range(r):
text = text+rows[j][i]
print(p.sub(' ',text))
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
matrix = []
for _ in range(n):
matrix_item = input()
matrix.append(matrix_item)
| ru | 0.224462 | #!/bin/python3 #\%\s]+)(?=\w)') | 2.905032 | 3 |
03-Spark DFs/08-Spark DF withColumn.py | PacktPublishing/PySpark-and-AWS-Master-Big-Data-with-PySpark-and-AWS | 3 | 6630402 | <gh_stars>1-10
# Databricks notebook source
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, lit
spark = SparkSession.builder.appName("Spark DataFrames").getOrCreate()
# COMMAND ----------
df = spark.read.options(header='True', inferSchema='True').csv('/FileStore/tables/StudentData.csv')
df.show()
# COMMAND ----------
df = df.withColumn("roll", col("roll").cast("String"))
# COMMAND ----------
df.printSchema()
# COMMAND ----------
df = df.withColumn("marks", col('marks') + 10)
df.show()
# COMMAND ----------
df = df.withColumn("aggregated marks", col('marks') - 10)
df.show()
# COMMAND ----------
df = df.withColumn("name", lit("USA"))
df.show()
# COMMAND ----------
df.show()
# COMMAND ----------
df = df.withColumn("marks", col("marks") - 10).withColumn("updated marks", col("marks") + 20).withColumn("Country", lit("USA"))
# COMMAND ----------
df.show()
| # Databricks notebook source
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, lit
spark = SparkSession.builder.appName("Spark DataFrames").getOrCreate()
# COMMAND ----------
df = spark.read.options(header='True', inferSchema='True').csv('/FileStore/tables/StudentData.csv')
df.show()
# COMMAND ----------
df = df.withColumn("roll", col("roll").cast("String"))
# COMMAND ----------
df.printSchema()
# COMMAND ----------
df = df.withColumn("marks", col('marks') + 10)
df.show()
# COMMAND ----------
df = df.withColumn("aggregated marks", col('marks') - 10)
df.show()
# COMMAND ----------
df = df.withColumn("name", lit("USA"))
df.show()
# COMMAND ----------
df.show()
# COMMAND ----------
df = df.withColumn("marks", col("marks") - 10).withColumn("updated marks", col("marks") + 20).withColumn("Country", lit("USA"))
# COMMAND ----------
df.show() | en | 0.150757 | # Databricks notebook source # COMMAND ---------- # COMMAND ---------- # COMMAND ---------- # COMMAND ---------- # COMMAND ---------- # COMMAND ---------- # COMMAND ---------- # COMMAND ---------- # COMMAND ---------- | 3.193572 | 3 |
sympy/physics/quantum/qft.py | shilpiprd/sympy | 8,323 | 6630403 | """An implementation of qubits and gates acting on them.
Todo:
* Update docstrings.
* Update tests.
* Implement apply using decompose.
* Implement represent using decompose or something smarter. For this to
work we first have to implement represent for SWAP.
* Decide if we want upper index to be inclusive in the constructor.
* Fix the printing of Rk gates in plotting.
"""
from sympy import Expr, Matrix, exp, I, pi, Integer, Symbol
from sympy.functions import sqrt
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.qexpr import QuantumError, QExpr
from sympy.matrices import eye
from sympy.physics.quantum.tensorproduct import matrix_tensor_product
from sympy.physics.quantum.gate import (
Gate, HadamardGate, SwapGate, OneQubitGate, CGate, PhaseGate, TGate, ZGate
)
__all__ = [
'QFT',
'IQFT',
'RkGate',
'Rk'
]
#-----------------------------------------------------------------------------
# Fourier stuff
#-----------------------------------------------------------------------------
class RkGate(OneQubitGate):
"""This is the R_k gate of the QTF."""
gate_name = 'Rk'
gate_name_latex = 'R'
def __new__(cls, *args):
if len(args) != 2:
raise QuantumError(
'Rk gates only take two arguments, got: %r' % args
)
# For small k, Rk gates simplify to other gates, using these
# substitutions give us familiar results for the QFT for small numbers
# of qubits.
target = args[0]
k = args[1]
if k == 1:
return ZGate(target)
elif k == 2:
return PhaseGate(target)
elif k == 3:
return TGate(target)
args = cls._eval_args(args)
inst = Expr.__new__(cls, *args)
inst.hilbert_space = cls._eval_hilbert_space(args)
return inst
@classmethod
def _eval_args(cls, args):
# Fall back to this, because Gate._eval_args assumes that args is
# all targets and can't contain duplicates.
return QExpr._eval_args(args)
@property
def k(self):
return self.label[1]
@property
def targets(self):
return self.label[:1]
@property
def gate_name_plot(self):
return r'$%s_%s$' % (self.gate_name_latex, str(self.k))
def get_target_matrix(self, format='sympy'):
if format == 'sympy':
return Matrix([[1, 0], [0, exp(Integer(2)*pi*I/(Integer(2)**self.k))]])
raise NotImplementedError(
'Invalid format for the R_k gate: %r' % format)
Rk = RkGate
class Fourier(Gate):
"""Superclass of Quantum Fourier and Inverse Quantum Fourier Gates."""
@classmethod
def _eval_args(self, args):
if len(args) != 2:
raise QuantumError(
'QFT/IQFT only takes two arguments, got: %r' % args
)
if args[0] >= args[1]:
raise QuantumError("Start must be smaller than finish")
return Gate._eval_args(args)
def _represent_default_basis(self, **options):
return self._represent_ZGate(None, **options)
def _represent_ZGate(self, basis, **options):
"""
Represents the (I)QFT In the Z Basis
"""
nqubits = options.get('nqubits', 0)
if nqubits == 0:
raise QuantumError(
'The number of qubits must be given as nqubits.')
if nqubits < self.min_qubits:
raise QuantumError(
'The number of qubits %r is too small for the gate.' % nqubits
)
size = self.size
omega = self.omega
#Make a matrix that has the basic Fourier Transform Matrix
arrayFT = [[omega**(
i*j % size)/sqrt(size) for i in range(size)] for j in range(size)]
matrixFT = Matrix(arrayFT)
#Embed the FT Matrix in a higher space, if necessary
if self.label[0] != 0:
matrixFT = matrix_tensor_product(eye(2**self.label[0]), matrixFT)
if self.min_qubits < nqubits:
matrixFT = matrix_tensor_product(
matrixFT, eye(2**(nqubits - self.min_qubits)))
return matrixFT
@property
def targets(self):
return range(self.label[0], self.label[1])
@property
def min_qubits(self):
return self.label[1]
@property
def size(self):
"""Size is the size of the QFT matrix"""
return 2**(self.label[1] - self.label[0])
@property
def omega(self):
return Symbol('omega')
class QFT(Fourier):
"""The forward quantum Fourier transform."""
gate_name = 'QFT'
gate_name_latex = 'QFT'
def decompose(self):
"""Decomposes QFT into elementary gates."""
start = self.label[0]
finish = self.label[1]
circuit = 1
for level in reversed(range(start, finish)):
circuit = HadamardGate(level)*circuit
for i in range(level - start):
circuit = CGate(level - i - 1, RkGate(level, i + 2))*circuit
for i in range((finish - start)//2):
circuit = SwapGate(i + start, finish - i - 1)*circuit
return circuit
def _apply_operator_Qubit(self, qubits, **options):
return qapply(self.decompose()*qubits)
def _eval_inverse(self):
return IQFT(*self.args)
@property
def omega(self):
return exp(2*pi*I/self.size)
class IQFT(Fourier):
"""The inverse quantum Fourier transform."""
gate_name = 'IQFT'
gate_name_latex = '{QFT^{-1}}'
def decompose(self):
"""Decomposes IQFT into elementary gates."""
start = self.args[0]
finish = self.args[1]
circuit = 1
for i in range((finish - start)//2):
circuit = SwapGate(i + start, finish - i - 1)*circuit
for level in range(start, finish):
for i in reversed(range(level - start)):
circuit = CGate(level - i - 1, RkGate(level, -i - 2))*circuit
circuit = HadamardGate(level)*circuit
return circuit
def _eval_inverse(self):
return QFT(*self.args)
@property
def omega(self):
return exp(-2*pi*I/self.size)
| """An implementation of qubits and gates acting on them.
Todo:
* Update docstrings.
* Update tests.
* Implement apply using decompose.
* Implement represent using decompose or something smarter. For this to
work we first have to implement represent for SWAP.
* Decide if we want upper index to be inclusive in the constructor.
* Fix the printing of Rk gates in plotting.
"""
from sympy import Expr, Matrix, exp, I, pi, Integer, Symbol
from sympy.functions import sqrt
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.qexpr import QuantumError, QExpr
from sympy.matrices import eye
from sympy.physics.quantum.tensorproduct import matrix_tensor_product
from sympy.physics.quantum.gate import (
Gate, HadamardGate, SwapGate, OneQubitGate, CGate, PhaseGate, TGate, ZGate
)
__all__ = [
'QFT',
'IQFT',
'RkGate',
'Rk'
]
#-----------------------------------------------------------------------------
# Fourier stuff
#-----------------------------------------------------------------------------
class RkGate(OneQubitGate):
"""This is the R_k gate of the QTF."""
gate_name = 'Rk'
gate_name_latex = 'R'
def __new__(cls, *args):
if len(args) != 2:
raise QuantumError(
'Rk gates only take two arguments, got: %r' % args
)
# For small k, Rk gates simplify to other gates, using these
# substitutions give us familiar results for the QFT for small numbers
# of qubits.
target = args[0]
k = args[1]
if k == 1:
return ZGate(target)
elif k == 2:
return PhaseGate(target)
elif k == 3:
return TGate(target)
args = cls._eval_args(args)
inst = Expr.__new__(cls, *args)
inst.hilbert_space = cls._eval_hilbert_space(args)
return inst
@classmethod
def _eval_args(cls, args):
# Fall back to this, because Gate._eval_args assumes that args is
# all targets and can't contain duplicates.
return QExpr._eval_args(args)
@property
def k(self):
return self.label[1]
@property
def targets(self):
return self.label[:1]
@property
def gate_name_plot(self):
return r'$%s_%s$' % (self.gate_name_latex, str(self.k))
def get_target_matrix(self, format='sympy'):
if format == 'sympy':
return Matrix([[1, 0], [0, exp(Integer(2)*pi*I/(Integer(2)**self.k))]])
raise NotImplementedError(
'Invalid format for the R_k gate: %r' % format)
Rk = RkGate
class Fourier(Gate):
"""Superclass of Quantum Fourier and Inverse Quantum Fourier Gates."""
@classmethod
def _eval_args(self, args):
if len(args) != 2:
raise QuantumError(
'QFT/IQFT only takes two arguments, got: %r' % args
)
if args[0] >= args[1]:
raise QuantumError("Start must be smaller than finish")
return Gate._eval_args(args)
def _represent_default_basis(self, **options):
return self._represent_ZGate(None, **options)
def _represent_ZGate(self, basis, **options):
"""
Represents the (I)QFT In the Z Basis
"""
nqubits = options.get('nqubits', 0)
if nqubits == 0:
raise QuantumError(
'The number of qubits must be given as nqubits.')
if nqubits < self.min_qubits:
raise QuantumError(
'The number of qubits %r is too small for the gate.' % nqubits
)
size = self.size
omega = self.omega
#Make a matrix that has the basic Fourier Transform Matrix
arrayFT = [[omega**(
i*j % size)/sqrt(size) for i in range(size)] for j in range(size)]
matrixFT = Matrix(arrayFT)
#Embed the FT Matrix in a higher space, if necessary
if self.label[0] != 0:
matrixFT = matrix_tensor_product(eye(2**self.label[0]), matrixFT)
if self.min_qubits < nqubits:
matrixFT = matrix_tensor_product(
matrixFT, eye(2**(nqubits - self.min_qubits)))
return matrixFT
@property
def targets(self):
return range(self.label[0], self.label[1])
@property
def min_qubits(self):
return self.label[1]
@property
def size(self):
"""Size is the size of the QFT matrix"""
return 2**(self.label[1] - self.label[0])
@property
def omega(self):
return Symbol('omega')
class QFT(Fourier):
"""The forward quantum Fourier transform."""
gate_name = 'QFT'
gate_name_latex = 'QFT'
def decompose(self):
"""Decomposes QFT into elementary gates."""
start = self.label[0]
finish = self.label[1]
circuit = 1
for level in reversed(range(start, finish)):
circuit = HadamardGate(level)*circuit
for i in range(level - start):
circuit = CGate(level - i - 1, RkGate(level, i + 2))*circuit
for i in range((finish - start)//2):
circuit = SwapGate(i + start, finish - i - 1)*circuit
return circuit
def _apply_operator_Qubit(self, qubits, **options):
return qapply(self.decompose()*qubits)
def _eval_inverse(self):
return IQFT(*self.args)
@property
def omega(self):
return exp(2*pi*I/self.size)
class IQFT(Fourier):
"""The inverse quantum Fourier transform."""
gate_name = 'IQFT'
gate_name_latex = '{QFT^{-1}}'
def decompose(self):
"""Decomposes IQFT into elementary gates."""
start = self.args[0]
finish = self.args[1]
circuit = 1
for i in range((finish - start)//2):
circuit = SwapGate(i + start, finish - i - 1)*circuit
for level in range(start, finish):
for i in reversed(range(level - start)):
circuit = CGate(level - i - 1, RkGate(level, -i - 2))*circuit
circuit = HadamardGate(level)*circuit
return circuit
def _eval_inverse(self):
return QFT(*self.args)
@property
def omega(self):
return exp(-2*pi*I/self.size)
| en | 0.774068 | An implementation of qubits and gates acting on them. Todo: * Update docstrings. * Update tests. * Implement apply using decompose. * Implement represent using decompose or something smarter. For this to work we first have to implement represent for SWAP. * Decide if we want upper index to be inclusive in the constructor. * Fix the printing of Rk gates in plotting. #----------------------------------------------------------------------------- # Fourier stuff #----------------------------------------------------------------------------- This is the R_k gate of the QTF. # For small k, Rk gates simplify to other gates, using these # substitutions give us familiar results for the QFT for small numbers # of qubits. # Fall back to this, because Gate._eval_args assumes that args is # all targets and can't contain duplicates. Superclass of Quantum Fourier and Inverse Quantum Fourier Gates. Represents the (I)QFT In the Z Basis #Make a matrix that has the basic Fourier Transform Matrix #Embed the FT Matrix in a higher space, if necessary Size is the size of the QFT matrix The forward quantum Fourier transform. Decomposes QFT into elementary gates. The inverse quantum Fourier transform. Decomposes IQFT into elementary gates. | 3.288841 | 3 |
bulk_manager_django/BulkManager.py | roodrepo/bulk_manager_django | 0 | 6630404 | <reponame>roodrepo/bulk_manager_django
from django.db import connection
from django.db.models.query import QuerySet
from typing import Any, Tuple, TypeVar, Union, Optional
from default_mutable.DefaultMutable import defaultMutable
TypeClass = TypeVar('TypeClass')
class BulkManager:
_fields : dict = {}
_objects : dict = {}
_classes : dict = {}
_deletes : dict = {}
_creates : dict = {}
_mapping_obj_table_name : dict = {}
_table_order_delete : list = []
@defaultMutable
def delete(self, listObj : list = []) -> None:
'''
Performing delete according to the list passed
'''
# If the list is empty, we delete everything in the default order (FIFO)
if len(listObj) == 0:
listObj = self._table_order_delete
else:
listObj = self._getTablesNameFromList(listObj)
self._setDeleteOrderFromTablesName(listObj)
query_update_set_null = ''
query_delete = []
# Looping all the tables from the ordered list
for _table_name in self._table_order_delete:
# Checking if table is in the current list passed to delete
if _table_name in listObj:
if _table_name in self._deletes:
query_delete.append(f'DELETE FROM {_table_name} WHERE {self._deletes[_table_name]["pk_field"]} IN ({", ".join(self._deletes[_table_name]["ids"])})')
del self._deletes[_table_name]
final_query = query_update_set_null + ';'.join(query_delete)
if final_query != '':
with connection.cursor() as cursor:
cursor.execute(final_query)
def prepareDelete(self, obj: Union[TypeClass, QuerySet]) -> None:
'''
Kepping in memory all the parameters to perform a bulk delete later on
:param obj: django object to delete or a QuerySet to delete
'''
if isinstance(obj, QuerySet):
for record in obj:
self.prepareDelete(record)
else:
tablename = obj._meta.db_table
pk_name = obj._meta.pk.name
if tablename not in self._deletes:
self._mapping_obj_table_name[obj.__class__.__name__] = tablename
self._table_order_delete.append(tablename)
self._deletes[tablename] = {
'pk_field' : pk_name,
'ids' : [],
}
self._deletes[tablename]['ids'].append(str(getattr(obj, pk_name)))
def getValueFromMemory(self, obj: TypeClass, attr: Optional[str] = None, default_value: Union[str, int, float, bool, None] = None) -> Union[str, int, float, bool, None]:
'''
Access the value of an object previously updated
'''
if default_value == None and isinstance(getattr(obj, attr), (int, float)) == True:
default_value = 0
pk_value = getattr(obj, obj._meta.pk.name)
class_name = obj.__class__.__name__
if class_name in self._objects and pk_value in self._objects[class_name]:
# Returning the value
if attr is not None and hasattr(self._objects[class_name][pk_value], attr):
return getattr(self._objects[class_name][pk_value], attr)
# Returning the actual object
elif attr is None:
return self._objects[class_name][pk_value]
return default_value
def set(self, obj: TypeClass, attr: Union[str, list], value: Any) -> None:
'''
Set a values to update
'''
if type(attr) == str:
attr = attr.split('.')
up_obj, is_updated, objclass = self._deepSetAttr(obj, attr, value)
classname = objclass.__name__
cur_field = attr[-1]
if is_updated == True:
pk_value = getattr(obj, up_obj._meta.pk.name)
if classname not in self._fields:
self._fields[classname] = []
self._objects[classname] = {}
self._classes[classname] = objclass
if cur_field not in self._fields[classname]:
self._fields[classname].append(cur_field)
if pk_value not in self._objects[classname]:
self._objects[classname][pk_value] = up_obj
else:
# Only updating the current value
setattr(self._objects[classname][pk_value], attr[0], value)
@defaultMutable
def update(self, listObj: list = []) -> None:
'''
Perform bulk update
'''
if len(listObj) == 0:
listObj = list(self._fields.keys())
exception = None
for obj in listObj:
if obj in self._objects:
_listUpdateObj = list(self._objects[obj].values())
try:
self._classes[obj].objects.bulk_update(_listUpdateObj, self._fields[obj])
except Exception as e:
# In case the bulk threw an error, we update the objects one by one to avoid data loss
for __objToUpdate in _listUpdateObj:
try:
__objToUpdate.save()
except Exception as e:
exception= f'Error bulk create: {str(e)}'
del self._fields[obj]
del self._objects[obj]
if obj not in self._creates:
del self._classes[obj]
if exception is not None:
raise Exception(exception)
def prepareCreate(self, obj: Union[TypeClass, list]) -> None:
'''
Prepare the list of all objects to create in bulk
'''
if isinstance(obj, (list, set)):
for _obj in obj:
self._prepareCreateObj(_obj)
else:
self._prepareCreateObj(obj)
def _prepareCreateObj(self, obj: TypeClass) -> None:
classname = obj.__class__.__name__
if classname not in self._creates:
self._creates[classname] = []
self._classes[classname] = obj.__class__
self._creates[classname].append(obj)
@defaultMutable
def create(self, listObj: list = []) -> None:
'''
Perform bulk create
'''
if len(listObj) == 0:
listObj = list(self._creates.keys())
exception = None
for obj in listObj:
try:
self._classes[obj].objects.bulk_create(self._creates[obj])
except Exception as e:
# In case the bulk threw an error, we update the objects one by one to avoid data loss
for __objToCreate in self._creates[obj]:
try:
__objToCreate.save()
except Exception as e:
exception= f'Error bulk create: {str(e)}'
del self._creates[obj]
if obj not in self._fields:
del self._classes[obj]
if exception is not None:
raise Exception(exception)
@defaultMutable
def execute(self, create_order: list= [], delete_order: list= []) -> None:
'''
Perform all the pending operations
'''
self.create(create_order)
self.update()
self.delete(delete_order)
@defaultMutable
def _setDeleteOrderFromObjName(self, _list: list = []) -> None:
'''
Setting the order of deletion to avoid relationship constraint issues
'''
current_list = self._table_order_delete
self._table_order_delete = []
for _obj in _list:
_table_name = self._mapping_obj_table_name[_obj]
self._table_order_delete.append(_table_name)
current_list.remove(_table_name)
self._table_order_delete += current_list
@defaultMutable
def _setDeleteOrderFromTablesName(self, _list: list = []) -> None:
'''
Setting the order of deletion to avoid relationship constraint issues
'''
current_list = self._table_order_delete
self._table_order_delete = []
for _table_name in _list:
self._table_order_delete.append(_table_name)
if _table_name in current_list:
current_list.remove(_table_name)
self._table_order_delete += current_list
def _getTablesNameFromList(self, _list: list) -> list:
'''
returning the list with the tables name in case there is the Model name in the list
'''
response = []
for item in _list:
if item in self._mapping_obj_table_name:
response.append(self._mapping_obj_table_name[item])
else:
response.append(item)
return response
def _deepSetAttr(self, obj: TypeClass, attr: Union[str, list], value: Any) -> Tuple[TypeClass, bool, TypeClass]:
'''
Update a value from a multi level object
'''
if type(attr) == str:
attr = attr.split('.')
cur_obj = obj
objclass = None
is_updated = False
for _attr in attr:
if _attr == attr[-1]:
objclass = cur_obj.__class__
cur_value = getattr(cur_obj, _attr)
if cur_value != value:
setattr(cur_obj, _attr, value)
is_updated = True
else:
cur_obj = getattr(cur_obj, _attr)
return cur_obj, is_updated, objclass | from django.db import connection
from django.db.models.query import QuerySet
from typing import Any, Tuple, TypeVar, Union, Optional
from default_mutable.DefaultMutable import defaultMutable
TypeClass = TypeVar('TypeClass')
class BulkManager:
_fields : dict = {}
_objects : dict = {}
_classes : dict = {}
_deletes : dict = {}
_creates : dict = {}
_mapping_obj_table_name : dict = {}
_table_order_delete : list = []
@defaultMutable
def delete(self, listObj : list = []) -> None:
'''
Performing delete according to the list passed
'''
# If the list is empty, we delete everything in the default order (FIFO)
if len(listObj) == 0:
listObj = self._table_order_delete
else:
listObj = self._getTablesNameFromList(listObj)
self._setDeleteOrderFromTablesName(listObj)
query_update_set_null = ''
query_delete = []
# Looping all the tables from the ordered list
for _table_name in self._table_order_delete:
# Checking if table is in the current list passed to delete
if _table_name in listObj:
if _table_name in self._deletes:
query_delete.append(f'DELETE FROM {_table_name} WHERE {self._deletes[_table_name]["pk_field"]} IN ({", ".join(self._deletes[_table_name]["ids"])})')
del self._deletes[_table_name]
final_query = query_update_set_null + ';'.join(query_delete)
if final_query != '':
with connection.cursor() as cursor:
cursor.execute(final_query)
def prepareDelete(self, obj: Union[TypeClass, QuerySet]) -> None:
'''
Kepping in memory all the parameters to perform a bulk delete later on
:param obj: django object to delete or a QuerySet to delete
'''
if isinstance(obj, QuerySet):
for record in obj:
self.prepareDelete(record)
else:
tablename = obj._meta.db_table
pk_name = obj._meta.pk.name
if tablename not in self._deletes:
self._mapping_obj_table_name[obj.__class__.__name__] = tablename
self._table_order_delete.append(tablename)
self._deletes[tablename] = {
'pk_field' : pk_name,
'ids' : [],
}
self._deletes[tablename]['ids'].append(str(getattr(obj, pk_name)))
def getValueFromMemory(self, obj: TypeClass, attr: Optional[str] = None, default_value: Union[str, int, float, bool, None] = None) -> Union[str, int, float, bool, None]:
'''
Access the value of an object previously updated
'''
if default_value == None and isinstance(getattr(obj, attr), (int, float)) == True:
default_value = 0
pk_value = getattr(obj, obj._meta.pk.name)
class_name = obj.__class__.__name__
if class_name in self._objects and pk_value in self._objects[class_name]:
# Returning the value
if attr is not None and hasattr(self._objects[class_name][pk_value], attr):
return getattr(self._objects[class_name][pk_value], attr)
# Returning the actual object
elif attr is None:
return self._objects[class_name][pk_value]
return default_value
def set(self, obj: TypeClass, attr: Union[str, list], value: Any) -> None:
'''
Set a values to update
'''
if type(attr) == str:
attr = attr.split('.')
up_obj, is_updated, objclass = self._deepSetAttr(obj, attr, value)
classname = objclass.__name__
cur_field = attr[-1]
if is_updated == True:
pk_value = getattr(obj, up_obj._meta.pk.name)
if classname not in self._fields:
self._fields[classname] = []
self._objects[classname] = {}
self._classes[classname] = objclass
if cur_field not in self._fields[classname]:
self._fields[classname].append(cur_field)
if pk_value not in self._objects[classname]:
self._objects[classname][pk_value] = up_obj
else:
# Only updating the current value
setattr(self._objects[classname][pk_value], attr[0], value)
@defaultMutable
def update(self, listObj: list = []) -> None:
'''
Perform bulk update
'''
if len(listObj) == 0:
listObj = list(self._fields.keys())
exception = None
for obj in listObj:
if obj in self._objects:
_listUpdateObj = list(self._objects[obj].values())
try:
self._classes[obj].objects.bulk_update(_listUpdateObj, self._fields[obj])
except Exception as e:
# In case the bulk threw an error, we update the objects one by one to avoid data loss
for __objToUpdate in _listUpdateObj:
try:
__objToUpdate.save()
except Exception as e:
exception= f'Error bulk create: {str(e)}'
del self._fields[obj]
del self._objects[obj]
if obj not in self._creates:
del self._classes[obj]
if exception is not None:
raise Exception(exception)
def prepareCreate(self, obj: Union[TypeClass, list]) -> None:
'''
Prepare the list of all objects to create in bulk
'''
if isinstance(obj, (list, set)):
for _obj in obj:
self._prepareCreateObj(_obj)
else:
self._prepareCreateObj(obj)
def _prepareCreateObj(self, obj: TypeClass) -> None:
classname = obj.__class__.__name__
if classname not in self._creates:
self._creates[classname] = []
self._classes[classname] = obj.__class__
self._creates[classname].append(obj)
@defaultMutable
def create(self, listObj: list = []) -> None:
'''
Perform bulk create
'''
if len(listObj) == 0:
listObj = list(self._creates.keys())
exception = None
for obj in listObj:
try:
self._classes[obj].objects.bulk_create(self._creates[obj])
except Exception as e:
# In case the bulk threw an error, we update the objects one by one to avoid data loss
for __objToCreate in self._creates[obj]:
try:
__objToCreate.save()
except Exception as e:
exception= f'Error bulk create: {str(e)}'
del self._creates[obj]
if obj not in self._fields:
del self._classes[obj]
if exception is not None:
raise Exception(exception)
@defaultMutable
def execute(self, create_order: list= [], delete_order: list= []) -> None:
'''
Perform all the pending operations
'''
self.create(create_order)
self.update()
self.delete(delete_order)
@defaultMutable
def _setDeleteOrderFromObjName(self, _list: list = []) -> None:
'''
Setting the order of deletion to avoid relationship constraint issues
'''
current_list = self._table_order_delete
self._table_order_delete = []
for _obj in _list:
_table_name = self._mapping_obj_table_name[_obj]
self._table_order_delete.append(_table_name)
current_list.remove(_table_name)
self._table_order_delete += current_list
@defaultMutable
def _setDeleteOrderFromTablesName(self, _list: list = []) -> None:
'''
Setting the order of deletion to avoid relationship constraint issues
'''
current_list = self._table_order_delete
self._table_order_delete = []
for _table_name in _list:
self._table_order_delete.append(_table_name)
if _table_name in current_list:
current_list.remove(_table_name)
self._table_order_delete += current_list
def _getTablesNameFromList(self, _list: list) -> list:
'''
returning the list with the tables name in case there is the Model name in the list
'''
response = []
for item in _list:
if item in self._mapping_obj_table_name:
response.append(self._mapping_obj_table_name[item])
else:
response.append(item)
return response
def _deepSetAttr(self, obj: TypeClass, attr: Union[str, list], value: Any) -> Tuple[TypeClass, bool, TypeClass]:
'''
Update a value from a multi level object
'''
if type(attr) == str:
attr = attr.split('.')
cur_obj = obj
objclass = None
is_updated = False
for _attr in attr:
if _attr == attr[-1]:
objclass = cur_obj.__class__
cur_value = getattr(cur_obj, _attr)
if cur_value != value:
setattr(cur_obj, _attr, value)
is_updated = True
else:
cur_obj = getattr(cur_obj, _attr)
return cur_obj, is_updated, objclass | en | 0.83998 | Performing delete according to the list passed # If the list is empty, we delete everything in the default order (FIFO) # Looping all the tables from the ordered list # Checking if table is in the current list passed to delete Kepping in memory all the parameters to perform a bulk delete later on :param obj: django object to delete or a QuerySet to delete Access the value of an object previously updated # Returning the value # Returning the actual object Set a values to update # Only updating the current value Perform bulk update # In case the bulk threw an error, we update the objects one by one to avoid data loss Prepare the list of all objects to create in bulk Perform bulk create # In case the bulk threw an error, we update the objects one by one to avoid data loss Perform all the pending operations Setting the order of deletion to avoid relationship constraint issues Setting the order of deletion to avoid relationship constraint issues returning the list with the tables name in case there is the Model name in the list Update a value from a multi level object | 2.220936 | 2 |
cert_manager/_certificates.py | arcovangeest/python-cert_manager | 0 | 6630405 | <filename>cert_manager/_certificates.py
# -*- coding: utf-8 -*-
"""Define the cert_manager._certificate.Certificates base class."""
import logging
from requests.exceptions import HTTPError
from ._helpers import Pending
from ._endpoint import Endpoint
LOGGER = logging.getLogger(__name__)
class Certificates(Endpoint):
"""Act as a superclass for all certificate-related classes.
This is due to the fact that several of the API endpoints have almost identical functions, so this allows code
to be shared.
"""
valid_formats = [
"x509", # for X509, Base64 encoded
"x509CO", # for X509 Certificate only, Base64 encoded
"x509IO", # for X509 Intermediates/root only, Base64 encoded
"base64", # for PKCS#7 Base64 encoded,
"bin", # for PKCS#7 Bin encoded
"x509IOR", # for X509 Intermediates/root only Reverse, Base64 encoded
]
def __init__(self, client, endpoint, api_version="v1"):
"""Initialize the class.
:param object client: An instantiated cert_manager.Client object
:param string endpoint: The URL of the API endpoint (ex. "/ssl")
:param string api_version: The API version to use; the default is "v1"
"""
super().__init__(client=client, endpoint=endpoint, api_version=api_version)
# Set to None initially. Will be filled in by methods later.
self.__cert_types = None
self.__custom_fields = None
@property
def types(self):
"""Retrieve all certificate types that are currently available.
:return list: A list of dictionaries of certificate types
"""
# Only go to the API if we haven't done the API call yet, or if someone
# specifically wants to refresh the internal cache
if not self.__cert_types:
url = self._url("/types")
result = self._client.get(url)
# Build a dictionary instead of a flat list of dictionaries
self.__cert_types = {}
for res in result.json():
name = res["name"]
self.__cert_types[name] = {}
self.__cert_types[name]["id"] = res["id"]
self.__cert_types[name]["terms"] = res["terms"]
return self.__cert_types
@property
def custom_fields(self):
"""Retrieve all custom fields defined for SSL certificates.
:return list: A list of dictionaries of custom fields
"""
# Only go to the API if we haven't done the API call yet, or if someone
# specifically wants to refresh the internal cache
if not self.__custom_fields:
url = self._url("/customFields")
result = self._client.get(url)
self.__custom_fields = result.json()
return self.__custom_fields
def _validate_custom_fields(self, custom_fields):
"""Check the structure and contents of a list of dicts representing custom fields
Raise exceptions if validation fails
:raises Exception: if any of the validation steps fail
"""
# Make sure all custom fields are valid if present
custom_field_names = [f['name'] for f in self.custom_fields]
for custom_field in custom_fields:
if not isinstance(custom_field, dict):
msg = "Values in the custom_fields list must be dictionaries, not {}"
raise Exception(msg.format(type(custom_field)))
if not ('name' in custom_field and 'value' in custom_field):
raise Exception(
"Dictionaries in the custom_fields list must contain both a 'name' key and 'value' key"
)
if custom_field.get('name') not in custom_field_names:
msg = "Custom field {} not defined for your account. defined custom fields are {}"
raise Exception(msg.format(custom_field.get('name'), custom_field_names))
mandatory_fields = [f['name'] for f in self.custom_fields if f['mandatory'] is True]
for field_name in mandatory_fields:
# for each mandatory field, there should be exactly one dict in the custom_fields list
# whose name matches that mandatory field name
matching_fields = [f for f in custom_fields if f['name'] == field_name]
if len(matching_fields) < 1:
raise Exception("Missing mandatory custom field {}".format(field_name))
if len(matching_fields) > 1:
raise Exception("Too many custom field objects with name {}".format(field_name))
def collect(self, cert_id, cert_format):
"""Retrieve an existing certificate from the API.
This method will raise a Pending exception if the certificate is still in a pending state.
:param int cert_id: The certificate ID
:param str cert_format: The format in which to retreive the certificate. Allowed values: *self.valid_formats*
:return str: the string representing the certificate in the requested format
"""
if cert_format not in self.valid_formats:
raise Exception("Invalid cert format %s provided" % cert_format)
url = self._url("/collect/{}/{}".format(cert_id, cert_format))
try:
result = self._client.get(url)
except HTTPError as exc:
raise Pending("certificate %d still in 'pending' state" % cert_id) from exc
# The certificate is ready for collection
return result.content.decode(result.encoding)
def enroll(self, **kwargs):
"""Enroll a certificate request with Sectigo to generate a certificate.
:param string cert_type_name: The full cert type name
Note: the name must match names returned from the get_types() method
:param string csr: The Certificate Signing Request (CSR)
:param int term: The length, in days, for the certificate to be issued
:param int org_id: The ID of the organization in which to enroll the certificate
:param list subject_alt_names: A list of Subject Alternative Names
:param list external_requester: One or more e-mail addresses
:param list custom_fields: zero or more objects representing custom fields and their values
Note: each object must have a 'name' key and a 'value' key
:return dict: The certificate_id and the normal status messages for errors
"""
# Retrieve all the arguments
cert_type_name = kwargs.get("cert_type_name")
csr = kwargs.get("csr")
term = kwargs.get("term")
org_id = kwargs.get("org_id")
subject_alt_names = kwargs.get("subject_alt_names", None)
external_requester = kwargs.get("external_requester", None)
custom_fields = kwargs.get("custom_fields", list())
# Make sure a valid certificate type name was provided
if cert_type_name not in self.types:
raise Exception("Incorrect certificate type specified: '{}'".format(cert_type_name))
type_id = self.types[cert_type_name]["id"]
terms = self.types[cert_type_name]["terms"]
# Make sure a valid term is specified
if term not in terms:
# You have to do the list/map/str thing because join can only operate on
# a list of strings, and this will be a list of numbers
trm = ", ".join(list(map(str, terms)))
raise Exception("Incorrect term specified: {}. Valid terms are {}.".format(term, trm))
self._validate_custom_fields(custom_fields)
url = self._url("/enroll")
data = {
"orgId": org_id, "csr": csr.rstrip(), "subjAltNames": subject_alt_names, "certType": type_id,
"numberServers": 1, "serverType": -1, "term": term, "comments": "Enrolled by %s" % self._client.user_agent,
"externalRequester": external_requester
}
if custom_fields:
data['customFields'] = custom_fields
result = self._client.post(url, data=data)
return result.json()
def renew(self, cert_id):
"""Renew the certificate specified by the certificate ID.
:param int cert_id: The certificate ID
:return dict: The renewal result. "Successful" on success
"""
url = self._url("/renewById/{}".format(cert_id))
result = self._client.post(url, data="")
return result.json()
def replace(self, **kwargs):
"""Replace a pre-existing certificate.
:param int cert_id: The certificate ID
:param string csr: The Certificate Signing Request (CSR)
:param string common_name: Certificate common name.
:param str reason: Reason for replacement (up to 512 characters), can be blank: "", but must exist.
:param string subject_alt_names: Subject Alternative Names separated by a ",".
:return: The result of the operation, "Successful" on success
:rtype: dict
"""
# Retrieve all the arguments
cert_id = kwargs.get("cert_id")
csr = kwargs.get("csr")
common_name = kwargs.get("common_name")
reason = kwargs.get("reason")
subject_alt_names = kwargs.get("subject_alt_names", None)
url = self._url("/replace/{}".format(cert_id))
data = {"csr": csr, "commonName": common_name, "subjectAlternativeNames": subject_alt_names, "reason": reason}
result = self._client.post(url, data=data)
return result.json()
def revoke(self, cert_id, reason=""):
"""Revoke the certificate specified by the certificate ID.
:param int cert_id: The certificate ID
:param str reason: The Reason for revocation.
Reason can be up to 512 characters and cannot be blank (i.e. empty string)
:return dict: The revocation result. "Successful" on success
"""
url = self._url("/revoke/{}".format(cert_id))
# Sectigo has a 512 character limit on the "reason" message, so catch that here.
if (not reason) or (len(reason) > 511):
raise Exception("Sectigo limit: reason must be > 0 character and < 512 characters")
data = {"reason": reason}
result = self._client.post(url, data=data)
return result.json()
| <filename>cert_manager/_certificates.py
# -*- coding: utf-8 -*-
"""Define the cert_manager._certificate.Certificates base class."""
import logging
from requests.exceptions import HTTPError
from ._helpers import Pending
from ._endpoint import Endpoint
LOGGER = logging.getLogger(__name__)
class Certificates(Endpoint):
"""Act as a superclass for all certificate-related classes.
This is due to the fact that several of the API endpoints have almost identical functions, so this allows code
to be shared.
"""
valid_formats = [
"x509", # for X509, Base64 encoded
"x509CO", # for X509 Certificate only, Base64 encoded
"x509IO", # for X509 Intermediates/root only, Base64 encoded
"base64", # for PKCS#7 Base64 encoded,
"bin", # for PKCS#7 Bin encoded
"x509IOR", # for X509 Intermediates/root only Reverse, Base64 encoded
]
def __init__(self, client, endpoint, api_version="v1"):
"""Initialize the class.
:param object client: An instantiated cert_manager.Client object
:param string endpoint: The URL of the API endpoint (ex. "/ssl")
:param string api_version: The API version to use; the default is "v1"
"""
super().__init__(client=client, endpoint=endpoint, api_version=api_version)
# Set to None initially. Will be filled in by methods later.
self.__cert_types = None
self.__custom_fields = None
@property
def types(self):
"""Retrieve all certificate types that are currently available.
:return list: A list of dictionaries of certificate types
"""
# Only go to the API if we haven't done the API call yet, or if someone
# specifically wants to refresh the internal cache
if not self.__cert_types:
url = self._url("/types")
result = self._client.get(url)
# Build a dictionary instead of a flat list of dictionaries
self.__cert_types = {}
for res in result.json():
name = res["name"]
self.__cert_types[name] = {}
self.__cert_types[name]["id"] = res["id"]
self.__cert_types[name]["terms"] = res["terms"]
return self.__cert_types
@property
def custom_fields(self):
"""Retrieve all custom fields defined for SSL certificates.
:return list: A list of dictionaries of custom fields
"""
# Only go to the API if we haven't done the API call yet, or if someone
# specifically wants to refresh the internal cache
if not self.__custom_fields:
url = self._url("/customFields")
result = self._client.get(url)
self.__custom_fields = result.json()
return self.__custom_fields
def _validate_custom_fields(self, custom_fields):
"""Check the structure and contents of a list of dicts representing custom fields
Raise exceptions if validation fails
:raises Exception: if any of the validation steps fail
"""
# Make sure all custom fields are valid if present
custom_field_names = [f['name'] for f in self.custom_fields]
for custom_field in custom_fields:
if not isinstance(custom_field, dict):
msg = "Values in the custom_fields list must be dictionaries, not {}"
raise Exception(msg.format(type(custom_field)))
if not ('name' in custom_field and 'value' in custom_field):
raise Exception(
"Dictionaries in the custom_fields list must contain both a 'name' key and 'value' key"
)
if custom_field.get('name') not in custom_field_names:
msg = "Custom field {} not defined for your account. defined custom fields are {}"
raise Exception(msg.format(custom_field.get('name'), custom_field_names))
mandatory_fields = [f['name'] for f in self.custom_fields if f['mandatory'] is True]
for field_name in mandatory_fields:
# for each mandatory field, there should be exactly one dict in the custom_fields list
# whose name matches that mandatory field name
matching_fields = [f for f in custom_fields if f['name'] == field_name]
if len(matching_fields) < 1:
raise Exception("Missing mandatory custom field {}".format(field_name))
if len(matching_fields) > 1:
raise Exception("Too many custom field objects with name {}".format(field_name))
def collect(self, cert_id, cert_format):
"""Retrieve an existing certificate from the API.
This method will raise a Pending exception if the certificate is still in a pending state.
:param int cert_id: The certificate ID
:param str cert_format: The format in which to retreive the certificate. Allowed values: *self.valid_formats*
:return str: the string representing the certificate in the requested format
"""
if cert_format not in self.valid_formats:
raise Exception("Invalid cert format %s provided" % cert_format)
url = self._url("/collect/{}/{}".format(cert_id, cert_format))
try:
result = self._client.get(url)
except HTTPError as exc:
raise Pending("certificate %d still in 'pending' state" % cert_id) from exc
# The certificate is ready for collection
return result.content.decode(result.encoding)
def enroll(self, **kwargs):
"""Enroll a certificate request with Sectigo to generate a certificate.
:param string cert_type_name: The full cert type name
Note: the name must match names returned from the get_types() method
:param string csr: The Certificate Signing Request (CSR)
:param int term: The length, in days, for the certificate to be issued
:param int org_id: The ID of the organization in which to enroll the certificate
:param list subject_alt_names: A list of Subject Alternative Names
:param list external_requester: One or more e-mail addresses
:param list custom_fields: zero or more objects representing custom fields and their values
Note: each object must have a 'name' key and a 'value' key
:return dict: The certificate_id and the normal status messages for errors
"""
# Retrieve all the arguments
cert_type_name = kwargs.get("cert_type_name")
csr = kwargs.get("csr")
term = kwargs.get("term")
org_id = kwargs.get("org_id")
subject_alt_names = kwargs.get("subject_alt_names", None)
external_requester = kwargs.get("external_requester", None)
custom_fields = kwargs.get("custom_fields", list())
# Make sure a valid certificate type name was provided
if cert_type_name not in self.types:
raise Exception("Incorrect certificate type specified: '{}'".format(cert_type_name))
type_id = self.types[cert_type_name]["id"]
terms = self.types[cert_type_name]["terms"]
# Make sure a valid term is specified
if term not in terms:
# You have to do the list/map/str thing because join can only operate on
# a list of strings, and this will be a list of numbers
trm = ", ".join(list(map(str, terms)))
raise Exception("Incorrect term specified: {}. Valid terms are {}.".format(term, trm))
self._validate_custom_fields(custom_fields)
url = self._url("/enroll")
data = {
"orgId": org_id, "csr": csr.rstrip(), "subjAltNames": subject_alt_names, "certType": type_id,
"numberServers": 1, "serverType": -1, "term": term, "comments": "Enrolled by %s" % self._client.user_agent,
"externalRequester": external_requester
}
if custom_fields:
data['customFields'] = custom_fields
result = self._client.post(url, data=data)
return result.json()
def renew(self, cert_id):
"""Renew the certificate specified by the certificate ID.
:param int cert_id: The certificate ID
:return dict: The renewal result. "Successful" on success
"""
url = self._url("/renewById/{}".format(cert_id))
result = self._client.post(url, data="")
return result.json()
def replace(self, **kwargs):
"""Replace a pre-existing certificate.
:param int cert_id: The certificate ID
:param string csr: The Certificate Signing Request (CSR)
:param string common_name: Certificate common name.
:param str reason: Reason for replacement (up to 512 characters), can be blank: "", but must exist.
:param string subject_alt_names: Subject Alternative Names separated by a ",".
:return: The result of the operation, "Successful" on success
:rtype: dict
"""
# Retrieve all the arguments
cert_id = kwargs.get("cert_id")
csr = kwargs.get("csr")
common_name = kwargs.get("common_name")
reason = kwargs.get("reason")
subject_alt_names = kwargs.get("subject_alt_names", None)
url = self._url("/replace/{}".format(cert_id))
data = {"csr": csr, "commonName": common_name, "subjectAlternativeNames": subject_alt_names, "reason": reason}
result = self._client.post(url, data=data)
return result.json()
def revoke(self, cert_id, reason=""):
"""Revoke the certificate specified by the certificate ID.
:param int cert_id: The certificate ID
:param str reason: The Reason for revocation.
Reason can be up to 512 characters and cannot be blank (i.e. empty string)
:return dict: The revocation result. "Successful" on success
"""
url = self._url("/revoke/{}".format(cert_id))
# Sectigo has a 512 character limit on the "reason" message, so catch that here.
if (not reason) or (len(reason) > 511):
raise Exception("Sectigo limit: reason must be > 0 character and < 512 characters")
data = {"reason": reason}
result = self._client.post(url, data=data)
return result.json()
| en | 0.783062 | # -*- coding: utf-8 -*- Define the cert_manager._certificate.Certificates base class. Act as a superclass for all certificate-related classes. This is due to the fact that several of the API endpoints have almost identical functions, so this allows code to be shared. # for X509, Base64 encoded # for X509 Certificate only, Base64 encoded # for X509 Intermediates/root only, Base64 encoded # for PKCS#7 Base64 encoded, # for PKCS#7 Bin encoded # for X509 Intermediates/root only Reverse, Base64 encoded Initialize the class. :param object client: An instantiated cert_manager.Client object :param string endpoint: The URL of the API endpoint (ex. "/ssl") :param string api_version: The API version to use; the default is "v1" # Set to None initially. Will be filled in by methods later. Retrieve all certificate types that are currently available. :return list: A list of dictionaries of certificate types # Only go to the API if we haven't done the API call yet, or if someone # specifically wants to refresh the internal cache # Build a dictionary instead of a flat list of dictionaries Retrieve all custom fields defined for SSL certificates. :return list: A list of dictionaries of custom fields # Only go to the API if we haven't done the API call yet, or if someone # specifically wants to refresh the internal cache Check the structure and contents of a list of dicts representing custom fields Raise exceptions if validation fails :raises Exception: if any of the validation steps fail # Make sure all custom fields are valid if present # for each mandatory field, there should be exactly one dict in the custom_fields list # whose name matches that mandatory field name Retrieve an existing certificate from the API. This method will raise a Pending exception if the certificate is still in a pending state. :param int cert_id: The certificate ID :param str cert_format: The format in which to retreive the certificate. Allowed values: *self.valid_formats* :return str: the string representing the certificate in the requested format # The certificate is ready for collection Enroll a certificate request with Sectigo to generate a certificate. :param string cert_type_name: The full cert type name Note: the name must match names returned from the get_types() method :param string csr: The Certificate Signing Request (CSR) :param int term: The length, in days, for the certificate to be issued :param int org_id: The ID of the organization in which to enroll the certificate :param list subject_alt_names: A list of Subject Alternative Names :param list external_requester: One or more e-mail addresses :param list custom_fields: zero or more objects representing custom fields and their values Note: each object must have a 'name' key and a 'value' key :return dict: The certificate_id and the normal status messages for errors # Retrieve all the arguments # Make sure a valid certificate type name was provided # Make sure a valid term is specified # You have to do the list/map/str thing because join can only operate on # a list of strings, and this will be a list of numbers Renew the certificate specified by the certificate ID. :param int cert_id: The certificate ID :return dict: The renewal result. "Successful" on success Replace a pre-existing certificate. :param int cert_id: The certificate ID :param string csr: The Certificate Signing Request (CSR) :param string common_name: Certificate common name. :param str reason: Reason for replacement (up to 512 characters), can be blank: "", but must exist. :param string subject_alt_names: Subject Alternative Names separated by a ",". :return: The result of the operation, "Successful" on success :rtype: dict # Retrieve all the arguments Revoke the certificate specified by the certificate ID. :param int cert_id: The certificate ID :param str reason: The Reason for revocation. Reason can be up to 512 characters and cannot be blank (i.e. empty string) :return dict: The revocation result. "Successful" on success # Sectigo has a 512 character limit on the "reason" message, so catch that here. | 2.953892 | 3 |
cmdb/migrations/0016_delete_infs.py | Andrreww/consistency-system | 1 | 6630406 | # Generated by Django 2.0.4 on 2018-04-24 14:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cmdb', '0015_auto_20180424_1404'),
]
operations = [
migrations.DeleteModel(
name='Infs',
),
]
| # Generated by Django 2.0.4 on 2018-04-24 14:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cmdb', '0015_auto_20180424_1404'),
]
operations = [
migrations.DeleteModel(
name='Infs',
),
]
| en | 0.854057 | # Generated by Django 2.0.4 on 2018-04-24 14:07 | 1.438024 | 1 |
modules/chempy/mol2.py | dualword/pymol-open-source | 636 | 6630407 | #A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* copyright Schrodinger LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
from chempy import Storage
# see layer2/AtomInfo.h
cAtomFlag_polymer = 0x08000000
cAtomFlag_solvent = 0x10000000
class MOL2(Storage):
_bondTypes = { 1 : "1",
2 : "2",
3 : "3",
4 : "ar",
0 : "nc",
}
def toList(self,model,**kwargs):
buf = ["# created with PyMOL\n"]
# RTI MOLECULE
buf.append("@<TRIPOS>MOLECULE\n")
buf.append(model.molecule.title + "\n")
# defer until number of substructures known
buf_i_counts = len(buf)
buf.append(None)
buf.append("SMALL\n")
buf.append("USER_CHARGES\n")
no_text_type_count = 0
at_prev = None
residues = []
# RTI ATOM
# atom_id atom_name x y z atom_type [subst_id
# [subst_name [charge [status_bit]]]]
buf.append("@<TRIPOS>ATOM\n")
for atom_id, at in enumerate(model.atom, 1):
resn = at.resn or "UNK"
subst_name = resn + at.resi
if not (at_prev and at_prev.in_same_residue(at)):
residues.append([subst_name, atom_id,
at.chain or at.segi or '****',
resn, at.flags])
at_prev = at
text_type = at.text_type
if not text_type:
no_text_type_count += 1
text_type = at.symbol or "Any"
buf.append("%d\t%4s\t%.3f\t%.3f\t%.3f\t%2s\t%d\t%s\t%.3f\t%s\n" %
(atom_id,
at.name or at.symbol or "X",
at.coord[0],at.coord[1],at.coord[2],
text_type,
len(residues), subst_name,
at.partial_charge,
'WATER' if (at.flags & cAtomFlag_solvent) else '',
))
if no_text_type_count > 0:
print(" Warning: %d atoms missing 'text_type', using element symbol instead."
" Hint: use cmd.assign_atom_types() to assing MOL2 atom types.")
# RTI BOND
# bond_id origin_atom_id target_atom_id bond_type [status_bits]
buf.append("@<TRIPOS>BOND\n")
for b, bo in enumerate(model.bond):
bOrder = MOL2._bondTypes[bo.order]
buf.append("%d %d %d %s\n" % (b,
1 + bo.index[0],
1 + bo.index[1], bOrder))
# RTI SUBSTRUCTURE
# subst_id subst_name root_atom [subst_type [dict_type
# [chain [sub_type [inter_bonds [status [comment]]]]]]]
buf.append("@<TRIPOS>SUBSTRUCTURE\n")
for subst_id, res in enumerate(residues, 1):
buf.append('%d\t%s\t%d\t%s\t1 %s\t%s\n' % (subst_id,
res[0], res[1],
'RESIDUE' if (res[4] & cAtomFlag_polymer) else 'GROUP',
res[2], res[3]))
buf[buf_i_counts] = "%d %d %d\n" % (model.nAtom, model.nBond, len(residues))
return buf
| #A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* copyright Schrodinger LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
from chempy import Storage
# see layer2/AtomInfo.h
cAtomFlag_polymer = 0x08000000
cAtomFlag_solvent = 0x10000000
class MOL2(Storage):
_bondTypes = { 1 : "1",
2 : "2",
3 : "3",
4 : "ar",
0 : "nc",
}
def toList(self,model,**kwargs):
buf = ["# created with PyMOL\n"]
# RTI MOLECULE
buf.append("@<TRIPOS>MOLECULE\n")
buf.append(model.molecule.title + "\n")
# defer until number of substructures known
buf_i_counts = len(buf)
buf.append(None)
buf.append("SMALL\n")
buf.append("USER_CHARGES\n")
no_text_type_count = 0
at_prev = None
residues = []
# RTI ATOM
# atom_id atom_name x y z atom_type [subst_id
# [subst_name [charge [status_bit]]]]
buf.append("@<TRIPOS>ATOM\n")
for atom_id, at in enumerate(model.atom, 1):
resn = at.resn or "UNK"
subst_name = resn + at.resi
if not (at_prev and at_prev.in_same_residue(at)):
residues.append([subst_name, atom_id,
at.chain or at.segi or '****',
resn, at.flags])
at_prev = at
text_type = at.text_type
if not text_type:
no_text_type_count += 1
text_type = at.symbol or "Any"
buf.append("%d\t%4s\t%.3f\t%.3f\t%.3f\t%2s\t%d\t%s\t%.3f\t%s\n" %
(atom_id,
at.name or at.symbol or "X",
at.coord[0],at.coord[1],at.coord[2],
text_type,
len(residues), subst_name,
at.partial_charge,
'WATER' if (at.flags & cAtomFlag_solvent) else '',
))
if no_text_type_count > 0:
print(" Warning: %d atoms missing 'text_type', using element symbol instead."
" Hint: use cmd.assign_atom_types() to assing MOL2 atom types.")
# RTI BOND
# bond_id origin_atom_id target_atom_id bond_type [status_bits]
buf.append("@<TRIPOS>BOND\n")
for b, bo in enumerate(model.bond):
bOrder = MOL2._bondTypes[bo.order]
buf.append("%d %d %d %s\n" % (b,
1 + bo.index[0],
1 + bo.index[1], bOrder))
# RTI SUBSTRUCTURE
# subst_id subst_name root_atom [subst_type [dict_type
# [chain [sub_type [inter_bonds [status [comment]]]]]]]
buf.append("@<TRIPOS>SUBSTRUCTURE\n")
for subst_id, res in enumerate(residues, 1):
buf.append('%d\t%s\t%d\t%s\t1 %s\t%s\n' % (subst_id,
res[0], res[1],
'RESIDUE' if (res[4] & cAtomFlag_polymer) else 'GROUP',
res[2], res[3]))
buf[buf_i_counts] = "%d %d %d\n" % (model.nAtom, model.nBond, len(residues))
return buf
| en | 0.443335 | #A* ------------------------------------------------------------------- #B* This file contains source code for the PyMOL computer program #C* copyright Schrodinger LLC. #D* ------------------------------------------------------------------- #E* It is unlawful to modify or remove this copyright notice. #F* ------------------------------------------------------------------- #G* Please see the accompanying LICENSE file for further information. #H* ------------------------------------------------------------------- #I* Additional authors of this source file include: #-* #-* #-* #Z* ------------------------------------------------------------------- # see layer2/AtomInfo.h # RTI MOLECULE # defer until number of substructures known # RTI ATOM # atom_id atom_name x y z atom_type [subst_id # [subst_name [charge [status_bit]]]] # RTI BOND # bond_id origin_atom_id target_atom_id bond_type [status_bits] # RTI SUBSTRUCTURE # subst_id subst_name root_atom [subst_type [dict_type # [chain [sub_type [inter_bonds [status [comment]]]]]]] | 1.819163 | 2 |
ddpg/critic_network.py | santhisenan/SDN_DDoS_Simulation | 50 | 6630408 | <reponame>santhisenan/SDN_DDoS_Simulation<filename>ddpg/critic_network.py
import tensorflow as tf
class CriticNetwork(tf.keras.Model):
def __init__(self, state_dim, action_dim, h1_critic, h2_critic, h3_critic,
trainable):
super(CriticNetwork, self).__init__(name='critic_network')
self.state_dim = state_dim
self.action_dim = action_dim
self.h1_critic = h1_critic
self.h2_critic = h2_critic
self.h3_critic = h3_critic
# The layers of the model
self.hidden_1 = tf.layers.Dense(units=h1_critic, activation=tf.nn.relu,
trainable=trainable,
name='hidden_1')
self.hidden_2 = tf.layers.Dense(units=h2_critic, activation=tf.nn.relu,
trainable=trainable,
name='hidden_2')
self.hidden_3 = tf.layers.Dense(units=h3_critic, activation=tf.nn.relu,
trainable=trainable,
name='hidden_3')
self.output_layer = tf.layers.Dense(units=1,
trainable=trainable,
name='output_layer') # Default
# activation function
def call(self, input_state, input_action):
inputs = tf.concat([input_state, input_action], axis=1)
x = self.hidden_1(inputs)
x = self.hidden_2(x)
x = self.hidden_3(x)
return self.output_layer(x)
| import tensorflow as tf
class CriticNetwork(tf.keras.Model):
def __init__(self, state_dim, action_dim, h1_critic, h2_critic, h3_critic,
trainable):
super(CriticNetwork, self).__init__(name='critic_network')
self.state_dim = state_dim
self.action_dim = action_dim
self.h1_critic = h1_critic
self.h2_critic = h2_critic
self.h3_critic = h3_critic
# The layers of the model
self.hidden_1 = tf.layers.Dense(units=h1_critic, activation=tf.nn.relu,
trainable=trainable,
name='hidden_1')
self.hidden_2 = tf.layers.Dense(units=h2_critic, activation=tf.nn.relu,
trainable=trainable,
name='hidden_2')
self.hidden_3 = tf.layers.Dense(units=h3_critic, activation=tf.nn.relu,
trainable=trainable,
name='hidden_3')
self.output_layer = tf.layers.Dense(units=1,
trainable=trainable,
name='output_layer') # Default
# activation function
def call(self, input_state, input_action):
inputs = tf.concat([input_state, input_action], axis=1)
x = self.hidden_1(inputs)
x = self.hidden_2(x)
x = self.hidden_3(x)
return self.output_layer(x) | en | 0.614669 | # The layers of the model # Default # activation function | 2.94762 | 3 |
extract_features.py | abhimanyusethia12/InvoiceNet | 0 | 6630409 | import sys
import re
import pickle
import pandas as pd
from nltk import ngrams
import argparse
import datefinder
from tqdm import tqdm
"""
FEATURES:
raw_text: The raw text
processed_text: The raw text of the last word in the N-gram
text_pattern: The raw text, after replacing uppercase characters with X,
lowercase with x, numbers with 0, repeating whitespace with
single whitespace and the rest with ?
bottom_margin: Vertical coordinate of the bottom margin of the
N-gram normalized to the page height
top_margin: Vertical coordinate of the top margin of the
N-gram normalized to the page height
right_margin: Horizontal coordinate of the right margin of the
N-gram normalized to the page width
left_margin: Horizontal coordinate of the left margin of the
N-gram normalized to the page width
has_digits: Whether there are any digits 0-9 in the N-gram
length: Number of characters in the N-gram
position_on_line: Count of words to the left of this N-gram normalized
to the count of total words on this line
line_size: The number of words on this line
page_height: The height of the page of this N-gram
page_width: The width of the page of this N-gram
parses_as_amount: Whether the N-gram parses as a fractional amount
parses_as_date: Whether the N-gram parses as a date
parses_as_number: Whether the N-gram parses as an integer
"""
def ngrammer(tokens, length=4):
"""
Generates n-grams from the given tokens
:param tokens: list of tokens in the text
:param length: n-grams of up to this length
:return: n-grams as tuples
"""
for n in range(1, min(len(tokens) + 1, length+1)):
for gram in ngrams(tokens, n):
yield gram
def extract_features(path):
"""
Loads a pickled dataframe from the given path, creates n-grams and extracts features
:param path: path to pickled dataframe
:return: dataframe containing n-grams and corresponding features
"""
with open(path, 'rb') as pklfile:
df = pickle.load(pklfile)
files = {}
print("\nExtracting features...\n")
# Filters the data into individual files and finds out the minimum and maximum
# x and y coordinates to estimate the width and height of each file.
# Also estimates the x coordinate for each token in each line for every file.
for i, row in df.iterrows():
if row['files'] not in files:
files[row['files']] = {'lines': {'words': [], 'labels': [], 'ymin': [], 'ymax': []},
'xmin': sys.maxsize, 'ymin': sys.maxsize, 'xmax': 0, 'ymax': 0}
tokens = row['words'].strip().split(' ')
char_length = (row['coords'][2] - row['coords'][0]) / len(row['words'].strip())
token_coords = [{'xmin': row['coords'][0],
'xmax': row['coords'][0] + (char_length * len(tokens[0]))}]
for idx in range(1, len(tokens)):
token_coords.append({'xmin': token_coords[-1]['xmax'] + char_length,
'xmax': token_coords[-1]['xmax'] + (char_length * (len(tokens[idx])+1))})
files[row['files']]['lines']['words'].append({'tokens': tokens, 'coords': token_coords})
files[row['files']]['lines']['labels'].append(row['labels'])
files[row['files']]['lines']['ymin'].append(row['coords'][1])
files[row['files']]['lines']['ymax'].append(row['coords'][3])
files[row['files']]['xmin'] = min(files[row['files']]['xmin'], row['coords'][0])
files[row['files']]['ymin'] = min(files[row['files']]['ymin'], row['coords'][1])
files[row['files']]['xmax'] = max(files[row['files']]['xmax'], row['coords'][2])
files[row['files']]['ymax'] = max(files[row['files']]['ymax'], row['coords'][3])
del df
grams = {'raw_text': [],
'processed_text': [],
'text_pattern': [],
'length': [],
'line_size': [],
'position_on_line': [],
'has_digits': [],
'bottom_margin': [],
'top_margin': [],
'left_margin': [],
'right_margin': [],
'page_width': [],
'page_height': [],
'parses_as_amount': [],
'parses_as_date': [],
'parses_as_number': [],
'label': [],
'closest_ngrams': []
}
label_dict = {0: 0, 1: 1, 2: 2, 18: 3}
# Calculates N-grams of lengths ranging from 1-4 for each line in each
# file and calculates 17 features for each N-gram.
with tqdm(total=len(files)) as pbar:
for key, value in files.items():
num_ngrams = len(grams['raw_text'])
page_height = value['ymax'] - value['ymin']
page_width = value['xmax'] - value['xmin']
for i in range(len(value['lines']['words'])):
tokens = value['lines']['words'][i]['tokens']
token_coords = value['lines']['words'][i]['coords']
for ngram in ngrammer(tokens):
grams['parses_as_date'].append(0.0)
grams['parses_as_amount'].append(0.0)
grams['parses_as_number'].append(0.0)
processed_text = []
for word in ngram:
if bool(list(datefinder.find_dates(word))):
processed_text.append('date')
grams['parses_as_date'][-1] = 1.0
elif bool(re.search(r'\d\.\d', word)) or '$' in word:
processed_text.append('amount')
grams['parses_as_amount'][-1] = 1.0
elif word.isnumeric():
processed_text.append('number')
grams['parses_as_number'][-1] = 1.0
else:
processed_text.append(word.lower())
raw_text = ' '.join(ngram)
grams['raw_text'].append(raw_text)
grams['processed_text'].append(' '.join(processed_text))
grams['text_pattern'].append(re.sub('[a-z]', 'x', re.sub('[A-Z]', 'X', re.sub('\d', '0', re.sub(
'[^a-zA-Z\d\ ]', '?', raw_text)))))
grams['length'].append(len(' '.join(ngram)))
grams['line_size'].append(len(tokens))
grams['position_on_line'].append(tokens.index(ngram[0])/len(tokens))
grams['has_digits'].append(1.0 if bool(re.search(r'\d', raw_text)) else 0.0)
grams['left_margin'].append((token_coords[tokens.index(ngram[0])]['xmin'] - value['xmin']) / page_width)
grams['top_margin'].append((value['lines']['ymin'][i] - value['ymin']) / page_height)
grams['right_margin'].append((token_coords[tokens.index(ngram[-1])]['xmax'] - value['xmin']) / page_width)
grams['bottom_margin'].append((value['lines']['ymax'][i] - value['ymin']) / page_height)
grams['page_width'].append(page_width)
grams['page_height'].append(page_height)
grams['label'].append(label_dict[value['lines']['labels'][i]])
# Finds the closest N-grams on all 4 sides for each N-gram
for i in range(num_ngrams, len(grams['raw_text'])):
grams['closest_ngrams'].append([-1] * 4)
distance = [sys.maxsize] * 6
for j in range(num_ngrams, len(grams['raw_text'])):
d = [grams['top_margin'][i] - grams['bottom_margin'][j],
grams['top_margin'][j] - grams['bottom_margin'][i],
grams['left_margin'][i] - grams['right_margin'][j],
grams['left_margin'][j] - grams['right_margin'][i],
abs(grams['left_margin'][i] - grams['left_margin'][j])]
if i == j:
continue
# If in the same line, check for closest ngram to left and right
if d[0] == d[1]:
if distance[2] > d[2] > 0:
distance[2] = d[2]
grams['closest_ngrams'][i][2] = j
if distance[3] > d[3] > 0:
distance[3] = d[3]
grams['closest_ngrams'][i][3] = j
# If this ngram is above current ngram
elif distance[0] > d[0] >= 0 and distance[4] > d[4]:
distance[0] = d[0]
distance[4] = d[4]
grams['closest_ngrams'][i][0] = j
# If this ngram is below current ngram
elif distance[1] > d[1] >= 0 and distance[5] > d[4]:
distance[1] = d[1]
distance[5] = d[4]
grams['closest_ngrams'][i][1] = j
pbar.update(1)
return pd.DataFrame(data=grams)
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--data", default="data/dftrain.pk", help="path to training data")
ap.add_argument("--save_as", default="data/features.pk", help="save extracted features with this name")
args = ap.parse_args()
features = extract_features(args.data)
features.to_pickle(args.save_as, protocol=3)
print("\nSaved features as {}".format(args.save_as))
if __name__ == '__main__':
main()
| import sys
import re
import pickle
import pandas as pd
from nltk import ngrams
import argparse
import datefinder
from tqdm import tqdm
"""
FEATURES:
raw_text: The raw text
processed_text: The raw text of the last word in the N-gram
text_pattern: The raw text, after replacing uppercase characters with X,
lowercase with x, numbers with 0, repeating whitespace with
single whitespace and the rest with ?
bottom_margin: Vertical coordinate of the bottom margin of the
N-gram normalized to the page height
top_margin: Vertical coordinate of the top margin of the
N-gram normalized to the page height
right_margin: Horizontal coordinate of the right margin of the
N-gram normalized to the page width
left_margin: Horizontal coordinate of the left margin of the
N-gram normalized to the page width
has_digits: Whether there are any digits 0-9 in the N-gram
length: Number of characters in the N-gram
position_on_line: Count of words to the left of this N-gram normalized
to the count of total words on this line
line_size: The number of words on this line
page_height: The height of the page of this N-gram
page_width: The width of the page of this N-gram
parses_as_amount: Whether the N-gram parses as a fractional amount
parses_as_date: Whether the N-gram parses as a date
parses_as_number: Whether the N-gram parses as an integer
"""
def ngrammer(tokens, length=4):
"""
Generates n-grams from the given tokens
:param tokens: list of tokens in the text
:param length: n-grams of up to this length
:return: n-grams as tuples
"""
for n in range(1, min(len(tokens) + 1, length+1)):
for gram in ngrams(tokens, n):
yield gram
def extract_features(path):
"""
Loads a pickled dataframe from the given path, creates n-grams and extracts features
:param path: path to pickled dataframe
:return: dataframe containing n-grams and corresponding features
"""
with open(path, 'rb') as pklfile:
df = pickle.load(pklfile)
files = {}
print("\nExtracting features...\n")
# Filters the data into individual files and finds out the minimum and maximum
# x and y coordinates to estimate the width and height of each file.
# Also estimates the x coordinate for each token in each line for every file.
for i, row in df.iterrows():
if row['files'] not in files:
files[row['files']] = {'lines': {'words': [], 'labels': [], 'ymin': [], 'ymax': []},
'xmin': sys.maxsize, 'ymin': sys.maxsize, 'xmax': 0, 'ymax': 0}
tokens = row['words'].strip().split(' ')
char_length = (row['coords'][2] - row['coords'][0]) / len(row['words'].strip())
token_coords = [{'xmin': row['coords'][0],
'xmax': row['coords'][0] + (char_length * len(tokens[0]))}]
for idx in range(1, len(tokens)):
token_coords.append({'xmin': token_coords[-1]['xmax'] + char_length,
'xmax': token_coords[-1]['xmax'] + (char_length * (len(tokens[idx])+1))})
files[row['files']]['lines']['words'].append({'tokens': tokens, 'coords': token_coords})
files[row['files']]['lines']['labels'].append(row['labels'])
files[row['files']]['lines']['ymin'].append(row['coords'][1])
files[row['files']]['lines']['ymax'].append(row['coords'][3])
files[row['files']]['xmin'] = min(files[row['files']]['xmin'], row['coords'][0])
files[row['files']]['ymin'] = min(files[row['files']]['ymin'], row['coords'][1])
files[row['files']]['xmax'] = max(files[row['files']]['xmax'], row['coords'][2])
files[row['files']]['ymax'] = max(files[row['files']]['ymax'], row['coords'][3])
del df
grams = {'raw_text': [],
'processed_text': [],
'text_pattern': [],
'length': [],
'line_size': [],
'position_on_line': [],
'has_digits': [],
'bottom_margin': [],
'top_margin': [],
'left_margin': [],
'right_margin': [],
'page_width': [],
'page_height': [],
'parses_as_amount': [],
'parses_as_date': [],
'parses_as_number': [],
'label': [],
'closest_ngrams': []
}
label_dict = {0: 0, 1: 1, 2: 2, 18: 3}
# Calculates N-grams of lengths ranging from 1-4 for each line in each
# file and calculates 17 features for each N-gram.
with tqdm(total=len(files)) as pbar:
for key, value in files.items():
num_ngrams = len(grams['raw_text'])
page_height = value['ymax'] - value['ymin']
page_width = value['xmax'] - value['xmin']
for i in range(len(value['lines']['words'])):
tokens = value['lines']['words'][i]['tokens']
token_coords = value['lines']['words'][i]['coords']
for ngram in ngrammer(tokens):
grams['parses_as_date'].append(0.0)
grams['parses_as_amount'].append(0.0)
grams['parses_as_number'].append(0.0)
processed_text = []
for word in ngram:
if bool(list(datefinder.find_dates(word))):
processed_text.append('date')
grams['parses_as_date'][-1] = 1.0
elif bool(re.search(r'\d\.\d', word)) or '$' in word:
processed_text.append('amount')
grams['parses_as_amount'][-1] = 1.0
elif word.isnumeric():
processed_text.append('number')
grams['parses_as_number'][-1] = 1.0
else:
processed_text.append(word.lower())
raw_text = ' '.join(ngram)
grams['raw_text'].append(raw_text)
grams['processed_text'].append(' '.join(processed_text))
grams['text_pattern'].append(re.sub('[a-z]', 'x', re.sub('[A-Z]', 'X', re.sub('\d', '0', re.sub(
'[^a-zA-Z\d\ ]', '?', raw_text)))))
grams['length'].append(len(' '.join(ngram)))
grams['line_size'].append(len(tokens))
grams['position_on_line'].append(tokens.index(ngram[0])/len(tokens))
grams['has_digits'].append(1.0 if bool(re.search(r'\d', raw_text)) else 0.0)
grams['left_margin'].append((token_coords[tokens.index(ngram[0])]['xmin'] - value['xmin']) / page_width)
grams['top_margin'].append((value['lines']['ymin'][i] - value['ymin']) / page_height)
grams['right_margin'].append((token_coords[tokens.index(ngram[-1])]['xmax'] - value['xmin']) / page_width)
grams['bottom_margin'].append((value['lines']['ymax'][i] - value['ymin']) / page_height)
grams['page_width'].append(page_width)
grams['page_height'].append(page_height)
grams['label'].append(label_dict[value['lines']['labels'][i]])
# Finds the closest N-grams on all 4 sides for each N-gram
for i in range(num_ngrams, len(grams['raw_text'])):
grams['closest_ngrams'].append([-1] * 4)
distance = [sys.maxsize] * 6
for j in range(num_ngrams, len(grams['raw_text'])):
d = [grams['top_margin'][i] - grams['bottom_margin'][j],
grams['top_margin'][j] - grams['bottom_margin'][i],
grams['left_margin'][i] - grams['right_margin'][j],
grams['left_margin'][j] - grams['right_margin'][i],
abs(grams['left_margin'][i] - grams['left_margin'][j])]
if i == j:
continue
# If in the same line, check for closest ngram to left and right
if d[0] == d[1]:
if distance[2] > d[2] > 0:
distance[2] = d[2]
grams['closest_ngrams'][i][2] = j
if distance[3] > d[3] > 0:
distance[3] = d[3]
grams['closest_ngrams'][i][3] = j
# If this ngram is above current ngram
elif distance[0] > d[0] >= 0 and distance[4] > d[4]:
distance[0] = d[0]
distance[4] = d[4]
grams['closest_ngrams'][i][0] = j
# If this ngram is below current ngram
elif distance[1] > d[1] >= 0 and distance[5] > d[4]:
distance[1] = d[1]
distance[5] = d[4]
grams['closest_ngrams'][i][1] = j
pbar.update(1)
return pd.DataFrame(data=grams)
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--data", default="data/dftrain.pk", help="path to training data")
ap.add_argument("--save_as", default="data/features.pk", help="save extracted features with this name")
args = ap.parse_args()
features = extract_features(args.data)
features.to_pickle(args.save_as, protocol=3)
print("\nSaved features as {}".format(args.save_as))
if __name__ == '__main__':
main()
| en | 0.7644 | FEATURES: raw_text: The raw text processed_text: The raw text of the last word in the N-gram text_pattern: The raw text, after replacing uppercase characters with X, lowercase with x, numbers with 0, repeating whitespace with single whitespace and the rest with ? bottom_margin: Vertical coordinate of the bottom margin of the N-gram normalized to the page height top_margin: Vertical coordinate of the top margin of the N-gram normalized to the page height right_margin: Horizontal coordinate of the right margin of the N-gram normalized to the page width left_margin: Horizontal coordinate of the left margin of the N-gram normalized to the page width has_digits: Whether there are any digits 0-9 in the N-gram length: Number of characters in the N-gram position_on_line: Count of words to the left of this N-gram normalized to the count of total words on this line line_size: The number of words on this line page_height: The height of the page of this N-gram page_width: The width of the page of this N-gram parses_as_amount: Whether the N-gram parses as a fractional amount parses_as_date: Whether the N-gram parses as a date parses_as_number: Whether the N-gram parses as an integer Generates n-grams from the given tokens :param tokens: list of tokens in the text :param length: n-grams of up to this length :return: n-grams as tuples Loads a pickled dataframe from the given path, creates n-grams and extracts features :param path: path to pickled dataframe :return: dataframe containing n-grams and corresponding features # Filters the data into individual files and finds out the minimum and maximum # x and y coordinates to estimate the width and height of each file. # Also estimates the x coordinate for each token in each line for every file. # Calculates N-grams of lengths ranging from 1-4 for each line in each # file and calculates 17 features for each N-gram. # Finds the closest N-grams on all 4 sides for each N-gram # If in the same line, check for closest ngram to left and right # If this ngram is above current ngram # If this ngram is below current ngram | 3.193548 | 3 |
curt/cli.py | mittagessen/curt | 0 | 6630410 | <reponame>mittagessen/curt<filename>curt/cli.py
#! /usr/bin/env python
import glob
import time
import torch
import click
import os.path
import random
import logging
import pathlib
import datetime
import numpy as np
import torchvision.transforms as tf
from PIL import Image, ImageDraw
from pathlib import Path
from rich.logging import RichHandler
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, StochasticWeightAveraging
from curt.models import CurtCurveModel, MaskedCurtCurveModel
from curt.dataset import CurveDataModule
from curt.progress import KrakenTrainProgressBar
from curt.util.misc import NestedTensor
from curt.transforms import BezierCoeff
def set_logger(logger=None, level=logging.ERROR):
logger.addHandler(RichHandler(rich_tracebacks=True))
logger.setLevel(level)
# raise default max image size to 20k * 20k pixels
Image.MAX_IMAGE_PIXELS = 20000 ** 2
logging.captureWarnings(True)
logger = logging.getLogger()
torch.multiprocessing.set_sharing_strategy('file_system')
def _expand_gt(ctx, param, value):
images = []
for expression in value:
images.extend([x for x in glob.iglob(expression, recursive=True) if os.path.isfile(x)])
return images
def _validate_manifests(ctx, param, value):
images = []
for manifest in value:
for entry in manifest.readlines():
im_p = entry.rstrip('\r\n')
if os.path.isfile(im_p):
images.append(im_p)
else:
logger.warning('Invalid entry "{}" in {}'.format(im_p, manifest.name))
return images
def _validate_merging(ctx, param, value):
"""
Maps baseline/region merging to a dict of merge structures.
"""
if not value:
return None
merge_dict = {}
try:
for m in value:
k, v = m.split(':')
merge_dict[v] = k # type: ignore
except Exception:
raise click.BadParameter('Mappings must be in format target:src')
return merge_dict
@click.group()
@click.pass_context
@click.option('-v', '--verbose', default=0, count=True)
@click.option('-s', '--seed', default=None, type=click.INT,
help='Seed for numpy\'s and torch\'s RNG. Set to a fixed value to '
'ensure reproducible random splits of data')
def cli(ctx, verbose, seed):
if seed:
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
ctx.meta['verbose'] = verbose
set_logger(logger, level=30 - min(10 * verbose, 20))
@cli.command('polytrain')
@click.pass_context
@click.option('--precision', default='32', type=click.Choice(['64', '32', '16', 'bf16']), help='set tensor precision')
@click.option('-lr', '--learning-rate', default=0.0006, help='Learning rate')
@click.option('-blr', '--backbone-learning-rate', default=0.00006, help='Learning rate')
@click.option('-B', '--batch-size', default=1, help='Batch size')
@click.option('-w', '--weight-decay', default=0.01, help='Weight decay in optimizer')
@click.option('-N', '--epochs', default=25, help='Number of epochs to train for')
@click.option('-F', '--freq', show_default=True, default=1.0, type=click.FLOAT,
help='Model saving and report generation frequency in epochs '
'during training. If frequency is >1 it must be an integer, '
'i.e. running validation every n-th epoch.')
@click.option('-lr-drop', '--lr-drop', default=15, help='Reduction factor of learning rate over time')
@click.option('--dropout', default=0.1, help='Dropout applied in the transformer')
@click.option('--match-cost-class', default=1.0, help='Class coefficient in the matching cost')
@click.option('--match-cost-curve', default=5.0, help='L1 curve coefficient in the matching cost')
@click.option('--curve-loss-coef', default=5.0, help='L1 curve coefficient in the loss')
@click.option('--eos-coef', default=0.1, help='Relative classification weight of the no-object class')
@click.option('--mask-loss-coef', default=1.0, help='Mask loss coefficient')
@click.option('--dice-loss-coef', default=1.0, help='Mask dice loss coefficient')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='curt_model', help='Pytorch lightning output directory')
@click.option('-p', '--partition', show_default=True, default=0.9,
help='Ground truth data partition ratio between train/validation set')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True,
help='Valid baseline types in training data. May be used multiple times.')
@click.option('-mb',
'--merge-baselines',
show_default=True,
default=None,
help='Baseline type merge mapping. Same syntax as `--merge-regions`',
multiple=True,
callback=_validate_merging)
@click.option('--merge-all-baselines/--no-merge-baselines',
show_default=True,
default=False,
help='Merge all baseline types into `default`')
@click.option('--workers', show_default=True, default=2, help='Number of data loader workers.')
@click.option('-d', '--device', show_default=True, default='1', help='Select device to use (1, ...)')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def polytrain(ctx, precision, learning_rate, backbone_learning_rate,
batch_size, weight_decay, epochs, freq, lr_drop, dropout,
match_cost_class, match_cost_curve, curve_loss_coef, eos_coef,
mask_loss_coef, dice_loss_coef, load, output, partition,
training_files, evaluation_files, valid_baselines, merge_baselines,
merge_all_baselines, workers, device, ground_truth):
if evaluation_files:
partition = 1
ground_truth = list(ground_truth)
if training_files:
ground_truth.extend(training_files)
if len(ground_truth) == 0:
raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')
if freq > 1:
val_check_interval = {'check_val_every_n_epoch': int(freq)}
else:
val_check_interval = {'val_check_interval': freq}
if not valid_baselines:
valid_baselines = None
data_module = CurveDataModule(train_files=ground_truth,
val_files=evaluation_files,
partition=partition,
valid_baselines=valid_baselines,
merge_baselines=merge_baselines,
merge_all_baselines=merge_all_baselines,
max_lines=curt_model.num_queries,
batch_size=batch_size,
num_workers=workers,
masks=True)
if load:
curt_model = CurtCurveModel.load_from_checkpoint(load).model
model = MaskedCurtCurveModel(curt_model,
learning_rate=learning_rate,
weight_decay=weight_decay,
lr_drop=lr_drop,
match_cost_class=match_cost_class,
match_cost_curve=match_cost_curve,
curve_loss_coef=curve_loss_coef,
mask_loss_coef=mask_loss_coef,
dice_loss_coef=dice_loss_coef,
eos_coef=eos_coef,
batches_per_epoch=len(data_module.train_dataloader()))
else:
raise click.UsageError('No pretrained weights given for mask head training.')
click.echo("Line types: There's only one.")
# for k, v in data_module.curve_train.dataset.class_mapping.items():
# click.echo(f'{k}\t{v}')
checkpoint_cb = ModelCheckpoint(monitor='loss', save_top_k=5, mode='min')
if precision != 'bf16':
precision = int(precision)
trainer = Trainer(default_root_dir=output,
precision=precision,
max_epochs=epochs,
auto_select_gpus=True,
accelerator='gpu',
devices=device,
strategy='ddp',
callbacks=[KrakenTrainProgressBar(), checkpoint_cb, StochasticWeightAveraging(swa_epoch_start=0.8, annealing_epochs=int(0.2*epochs))],
batches_per_epoch=len(data_module.train_dataloader()),
**val_check_interval)
trainer.fit(model, data_module)
@cli.command('train')
@click.pass_context
@click.option('--precision', default='32', type=click.Choice(['64', '32', '16', 'bf16']), help='set tensor precision')
@click.option('-lr', '--learning-rate', default=1e-4, help='Learning rate')
@click.option('-blr', '--backbone-learning-rate', default=1e-4, help='Learning rate')
@click.option('-B', '--batch-size', default=1, help='Batch size')
@click.option('-w', '--weight-decay', default=1e-4, help='Weight decay in optimizer')
@click.option('-N', '--epochs', default=300, help='Number of epochs to train for')
@click.option('-F', '--freq', show_default=True, default=1.0, type=click.FLOAT,
help='Model saving and report generation frequency in epochs '
'during training. If frequency is >1 it must be an integer, '
'i.e. running validation every n-th epoch.')
@click.option('-lr-drop', '--lr-drop', default=200, help='Reduction factor of learning rate over time')
@click.option('--encoder', default='mit_b0', type=click.Choice(['mit_b0', 'mit_b1', 'mit_b2', 'mit_b3', 'mit_b4', 'mit_b5']), help='Encoding max transformers architecture')
@click.option('-dl', '--decoder-layers', default=3, help='Number of decoder layers in the transformer')
@click.option('-dff', '--dim-ff', default=2048, help='Intermediate size of the feedforward layers in the transformer block')
@click.option('-edd', '--embedding-dim', default=256, help='Size of the embeddings (dimension of the transformer')
@click.option('--dropout', default=0.1, help='Dropout applied in the transformer')
@click.option('-nh', '--num-heads', default=8, help="Number of attention heads inside the transformer's attentions")
@click.option('-nq', '--num-queries', default=500, help='Number of query slots (#lines + #regions detectable in an image)')
@click.option('--match-cost-class', default=1.0, help='Class coefficient in the matching cost')
@click.option('--match-cost-curve', default=5.0, help='L1 curve coefficient in the matching cost')
@click.option('--curve-loss-coef', default=5.0, help='L1 curve coefficient in the loss')
@click.option('--eos-coef', default=0.1, help='Relative classification weight of the no-object class')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='curt_model', help='Pytorch lightning output directory')
@click.option('-p', '--partition', show_default=True, default=0.9,
help='Ground truth data partition ratio between train/validation set')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True,
help='Valid baseline types in training data. May be used multiple times.')
@click.option('-mb',
'--merge-baselines',
show_default=True,
default=None,
help='Baseline type merge mapping. Same syntax as `--merge-regions`',
multiple=True,
callback=_validate_merging)
@click.option('--merge-all-baselines/--no-merge-baselines',
show_default=True,
default=False,
help='Merge all baseline types into `default`')
@click.option('--set-matcher/--dummy-matcher', show_default=True, default=True, help='Use the set criterion or dummy matching.')
@click.option('--aux-loss/--no-aux-loss', show_default=True, default=True, help='Enable auxiliary losses in decoder.')
@click.option('--workers', show_default=True, default=2, help='Number of data loader workers.')
@click.option('-d', '--device', show_default=True, default='1', help='Select device to use')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def train(ctx, precision, learning_rate, backbone_learning_rate, batch_size,
weight_decay, epochs, freq, lr_drop, encoder, decoder_layers, dim_ff,
embedding_dim, dropout, num_heads, num_queries, match_cost_class,
match_cost_curve, curve_loss_coef, eos_coef, load, output, partition,
training_files, evaluation_files, valid_baselines, merge_baselines,
merge_all_baselines, set_matcher, aux_loss, workers, device,
ground_truth):
if evaluation_files:
partition = 1
ground_truth = list(ground_truth)
if training_files:
ground_truth.extend(training_files)
if len(ground_truth) == 0:
raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')
if freq > 1:
val_check_interval = {'check_val_every_n_epoch': int(freq)}
else:
val_check_interval = {'val_check_interval': freq}
if not valid_baselines:
valid_baselines = None
data_module = CurveDataModule(train_files=ground_truth,
val_files=evaluation_files,
partition=partition,
valid_baselines=valid_baselines,
merge_baselines=merge_baselines,
merge_all_baselines=merge_all_baselines,
max_lines=num_queries,
batch_size=batch_size,
num_workers=workers)
click.echo("Line types: There's only one.")
# for k, v in data_module.curve_train.dataset.class_mapping.items():
# click.echo(f'{k}\t{v}')
if load:
model = CurtCurveModel.load_from_checkpoint(load)
else:
model = CurtCurveModel(data_module.num_classes,
num_queries=num_queries,
learning_rate=learning_rate,
backbone_learning_rate=backbone_learning_rate,
weight_decay=weight_decay,
lr_drop=lr_drop,
match_cost_class=match_cost_class,
match_cost_curve=match_cost_curve,
curve_loss_coef=curve_loss_coef,
eos_coef=eos_coef,
embedding_dim=embedding_dim,
dropout=dropout,
num_heads=num_heads,
dim_ff=dim_ff,
encoder=encoder,
decoder_layers=decoder_layers,
set_matcher=set_matcher,
aux_loss=aux_loss)
checkpoint_cb = ModelCheckpoint(monitor='loss', save_top_k=5, mode='min')
if precision != 'bf16':
precision = int(precision)
trainer = Trainer(default_root_dir=output,
precision=precision,
max_epochs=epochs,
auto_select_gpus=True,
accelerator='gpu',
devices=device,
strategy='ddp',
callbacks=[KrakenTrainProgressBar(), checkpoint_cb, StochasticWeightAveraging(swa_epoch_start=0.8, annealing_epochs=int(0.2*epochs))],
**val_check_interval)
trainer.fit(model, data_module)
@cli.command('pred')
@click.pass_context
@click.option('-i', '--load', help='Input model')
@click.option('-o', '--suffix', default='.overlay.png', show_default=True, help='Suffix for output files')
@click.option('-t', '--threshold', default=0.9, show_default=True, help='Minimum score for objectness')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.argument('input_files', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def pred(ctx, load, suffix, threshold, device, input_files):
curt_model = CurtCurveModel.load_from_checkpoint(load).model
curt_model = curt_model.to(device)
transforms = tf.Compose([tf.Resize(800),
tf.ToTensor(),
tf.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
for file in input_files:
click.echo(f'Processing {file}')
file = pathlib.Path(file)
with open(file, 'rb') as fp:
im = Image.open(file)
with open(file.with_suffix(suffix), 'wb') as fo:
with torch.no_grad():
i = transforms(im).to(device).unsqueeze(0)
mask = torch.zeros((1,) + i.shape[2:], device=device)
i = NestedTensor(i, mask)
o = curt_model(i)
draw = ImageDraw.Draw(im)
samples = np.linspace(0, 1, 20)
curves, logits = o['pred_curves'], o['pred_logits']
scores, labels = logits.softmax(-1).max(-1)
keep = labels.eq(1) & (scores > threshold)
curves = curves[keep]
curves = curves.to('cpu')
for line in curves:
line = (np.array(line) * (im.size * 4))
line.resize(4, 2)
draw.line([tuple(x) for x in np.array(BezierCoeff(samples)).dot(line)], fill=(0, 130, 200, 255), width=2, joint='curve')
del draw
im.save(fo, format='png')
if __name__ == '__main__':
cli()
| #! /usr/bin/env python
import glob
import time
import torch
import click
import os.path
import random
import logging
import pathlib
import datetime
import numpy as np
import torchvision.transforms as tf
from PIL import Image, ImageDraw
from pathlib import Path
from rich.logging import RichHandler
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, StochasticWeightAveraging
from curt.models import CurtCurveModel, MaskedCurtCurveModel
from curt.dataset import CurveDataModule
from curt.progress import KrakenTrainProgressBar
from curt.util.misc import NestedTensor
from curt.transforms import BezierCoeff
def set_logger(logger=None, level=logging.ERROR):
logger.addHandler(RichHandler(rich_tracebacks=True))
logger.setLevel(level)
# raise default max image size to 20k * 20k pixels
Image.MAX_IMAGE_PIXELS = 20000 ** 2
logging.captureWarnings(True)
logger = logging.getLogger()
torch.multiprocessing.set_sharing_strategy('file_system')
def _expand_gt(ctx, param, value):
images = []
for expression in value:
images.extend([x for x in glob.iglob(expression, recursive=True) if os.path.isfile(x)])
return images
def _validate_manifests(ctx, param, value):
images = []
for manifest in value:
for entry in manifest.readlines():
im_p = entry.rstrip('\r\n')
if os.path.isfile(im_p):
images.append(im_p)
else:
logger.warning('Invalid entry "{}" in {}'.format(im_p, manifest.name))
return images
def _validate_merging(ctx, param, value):
"""
Maps baseline/region merging to a dict of merge structures.
"""
if not value:
return None
merge_dict = {}
try:
for m in value:
k, v = m.split(':')
merge_dict[v] = k # type: ignore
except Exception:
raise click.BadParameter('Mappings must be in format target:src')
return merge_dict
@click.group()
@click.pass_context
@click.option('-v', '--verbose', default=0, count=True)
@click.option('-s', '--seed', default=None, type=click.INT,
help='Seed for numpy\'s and torch\'s RNG. Set to a fixed value to '
'ensure reproducible random splits of data')
def cli(ctx, verbose, seed):
if seed:
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
ctx.meta['verbose'] = verbose
set_logger(logger, level=30 - min(10 * verbose, 20))
@cli.command('polytrain')
@click.pass_context
@click.option('--precision', default='32', type=click.Choice(['64', '32', '16', 'bf16']), help='set tensor precision')
@click.option('-lr', '--learning-rate', default=0.0006, help='Learning rate')
@click.option('-blr', '--backbone-learning-rate', default=0.00006, help='Learning rate')
@click.option('-B', '--batch-size', default=1, help='Batch size')
@click.option('-w', '--weight-decay', default=0.01, help='Weight decay in optimizer')
@click.option('-N', '--epochs', default=25, help='Number of epochs to train for')
@click.option('-F', '--freq', show_default=True, default=1.0, type=click.FLOAT,
help='Model saving and report generation frequency in epochs '
'during training. If frequency is >1 it must be an integer, '
'i.e. running validation every n-th epoch.')
@click.option('-lr-drop', '--lr-drop', default=15, help='Reduction factor of learning rate over time')
@click.option('--dropout', default=0.1, help='Dropout applied in the transformer')
@click.option('--match-cost-class', default=1.0, help='Class coefficient in the matching cost')
@click.option('--match-cost-curve', default=5.0, help='L1 curve coefficient in the matching cost')
@click.option('--curve-loss-coef', default=5.0, help='L1 curve coefficient in the loss')
@click.option('--eos-coef', default=0.1, help='Relative classification weight of the no-object class')
@click.option('--mask-loss-coef', default=1.0, help='Mask loss coefficient')
@click.option('--dice-loss-coef', default=1.0, help='Mask dice loss coefficient')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='curt_model', help='Pytorch lightning output directory')
@click.option('-p', '--partition', show_default=True, default=0.9,
help='Ground truth data partition ratio between train/validation set')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True,
help='Valid baseline types in training data. May be used multiple times.')
@click.option('-mb',
'--merge-baselines',
show_default=True,
default=None,
help='Baseline type merge mapping. Same syntax as `--merge-regions`',
multiple=True,
callback=_validate_merging)
@click.option('--merge-all-baselines/--no-merge-baselines',
show_default=True,
default=False,
help='Merge all baseline types into `default`')
@click.option('--workers', show_default=True, default=2, help='Number of data loader workers.')
@click.option('-d', '--device', show_default=True, default='1', help='Select device to use (1, ...)')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def polytrain(ctx, precision, learning_rate, backbone_learning_rate,
batch_size, weight_decay, epochs, freq, lr_drop, dropout,
match_cost_class, match_cost_curve, curve_loss_coef, eos_coef,
mask_loss_coef, dice_loss_coef, load, output, partition,
training_files, evaluation_files, valid_baselines, merge_baselines,
merge_all_baselines, workers, device, ground_truth):
if evaluation_files:
partition = 1
ground_truth = list(ground_truth)
if training_files:
ground_truth.extend(training_files)
if len(ground_truth) == 0:
raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')
if freq > 1:
val_check_interval = {'check_val_every_n_epoch': int(freq)}
else:
val_check_interval = {'val_check_interval': freq}
if not valid_baselines:
valid_baselines = None
data_module = CurveDataModule(train_files=ground_truth,
val_files=evaluation_files,
partition=partition,
valid_baselines=valid_baselines,
merge_baselines=merge_baselines,
merge_all_baselines=merge_all_baselines,
max_lines=curt_model.num_queries,
batch_size=batch_size,
num_workers=workers,
masks=True)
if load:
curt_model = CurtCurveModel.load_from_checkpoint(load).model
model = MaskedCurtCurveModel(curt_model,
learning_rate=learning_rate,
weight_decay=weight_decay,
lr_drop=lr_drop,
match_cost_class=match_cost_class,
match_cost_curve=match_cost_curve,
curve_loss_coef=curve_loss_coef,
mask_loss_coef=mask_loss_coef,
dice_loss_coef=dice_loss_coef,
eos_coef=eos_coef,
batches_per_epoch=len(data_module.train_dataloader()))
else:
raise click.UsageError('No pretrained weights given for mask head training.')
click.echo("Line types: There's only one.")
# for k, v in data_module.curve_train.dataset.class_mapping.items():
# click.echo(f'{k}\t{v}')
checkpoint_cb = ModelCheckpoint(monitor='loss', save_top_k=5, mode='min')
if precision != 'bf16':
precision = int(precision)
trainer = Trainer(default_root_dir=output,
precision=precision,
max_epochs=epochs,
auto_select_gpus=True,
accelerator='gpu',
devices=device,
strategy='ddp',
callbacks=[KrakenTrainProgressBar(), checkpoint_cb, StochasticWeightAveraging(swa_epoch_start=0.8, annealing_epochs=int(0.2*epochs))],
batches_per_epoch=len(data_module.train_dataloader()),
**val_check_interval)
trainer.fit(model, data_module)
@cli.command('train')
@click.pass_context
@click.option('--precision', default='32', type=click.Choice(['64', '32', '16', 'bf16']), help='set tensor precision')
@click.option('-lr', '--learning-rate', default=1e-4, help='Learning rate')
@click.option('-blr', '--backbone-learning-rate', default=1e-4, help='Learning rate')
@click.option('-B', '--batch-size', default=1, help='Batch size')
@click.option('-w', '--weight-decay', default=1e-4, help='Weight decay in optimizer')
@click.option('-N', '--epochs', default=300, help='Number of epochs to train for')
@click.option('-F', '--freq', show_default=True, default=1.0, type=click.FLOAT,
help='Model saving and report generation frequency in epochs '
'during training. If frequency is >1 it must be an integer, '
'i.e. running validation every n-th epoch.')
@click.option('-lr-drop', '--lr-drop', default=200, help='Reduction factor of learning rate over time')
@click.option('--encoder', default='mit_b0', type=click.Choice(['mit_b0', 'mit_b1', 'mit_b2', 'mit_b3', 'mit_b4', 'mit_b5']), help='Encoding max transformers architecture')
@click.option('-dl', '--decoder-layers', default=3, help='Number of decoder layers in the transformer')
@click.option('-dff', '--dim-ff', default=2048, help='Intermediate size of the feedforward layers in the transformer block')
@click.option('-edd', '--embedding-dim', default=256, help='Size of the embeddings (dimension of the transformer')
@click.option('--dropout', default=0.1, help='Dropout applied in the transformer')
@click.option('-nh', '--num-heads', default=8, help="Number of attention heads inside the transformer's attentions")
@click.option('-nq', '--num-queries', default=500, help='Number of query slots (#lines + #regions detectable in an image)')
@click.option('--match-cost-class', default=1.0, help='Class coefficient in the matching cost')
@click.option('--match-cost-curve', default=5.0, help='L1 curve coefficient in the matching cost')
@click.option('--curve-loss-coef', default=5.0, help='L1 curve coefficient in the loss')
@click.option('--eos-coef', default=0.1, help='Relative classification weight of the no-object class')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='curt_model', help='Pytorch lightning output directory')
@click.option('-p', '--partition', show_default=True, default=0.9,
help='Ground truth data partition ratio between train/validation set')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True,
help='Valid baseline types in training data. May be used multiple times.')
@click.option('-mb',
'--merge-baselines',
show_default=True,
default=None,
help='Baseline type merge mapping. Same syntax as `--merge-regions`',
multiple=True,
callback=_validate_merging)
@click.option('--merge-all-baselines/--no-merge-baselines',
show_default=True,
default=False,
help='Merge all baseline types into `default`')
@click.option('--set-matcher/--dummy-matcher', show_default=True, default=True, help='Use the set criterion or dummy matching.')
@click.option('--aux-loss/--no-aux-loss', show_default=True, default=True, help='Enable auxiliary losses in decoder.')
@click.option('--workers', show_default=True, default=2, help='Number of data loader workers.')
@click.option('-d', '--device', show_default=True, default='1', help='Select device to use')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def train(ctx, precision, learning_rate, backbone_learning_rate, batch_size,
weight_decay, epochs, freq, lr_drop, encoder, decoder_layers, dim_ff,
embedding_dim, dropout, num_heads, num_queries, match_cost_class,
match_cost_curve, curve_loss_coef, eos_coef, load, output, partition,
training_files, evaluation_files, valid_baselines, merge_baselines,
merge_all_baselines, set_matcher, aux_loss, workers, device,
ground_truth):
if evaluation_files:
partition = 1
ground_truth = list(ground_truth)
if training_files:
ground_truth.extend(training_files)
if len(ground_truth) == 0:
raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')
if freq > 1:
val_check_interval = {'check_val_every_n_epoch': int(freq)}
else:
val_check_interval = {'val_check_interval': freq}
if not valid_baselines:
valid_baselines = None
data_module = CurveDataModule(train_files=ground_truth,
val_files=evaluation_files,
partition=partition,
valid_baselines=valid_baselines,
merge_baselines=merge_baselines,
merge_all_baselines=merge_all_baselines,
max_lines=num_queries,
batch_size=batch_size,
num_workers=workers)
click.echo("Line types: There's only one.")
# for k, v in data_module.curve_train.dataset.class_mapping.items():
# click.echo(f'{k}\t{v}')
if load:
model = CurtCurveModel.load_from_checkpoint(load)
else:
model = CurtCurveModel(data_module.num_classes,
num_queries=num_queries,
learning_rate=learning_rate,
backbone_learning_rate=backbone_learning_rate,
weight_decay=weight_decay,
lr_drop=lr_drop,
match_cost_class=match_cost_class,
match_cost_curve=match_cost_curve,
curve_loss_coef=curve_loss_coef,
eos_coef=eos_coef,
embedding_dim=embedding_dim,
dropout=dropout,
num_heads=num_heads,
dim_ff=dim_ff,
encoder=encoder,
decoder_layers=decoder_layers,
set_matcher=set_matcher,
aux_loss=aux_loss)
checkpoint_cb = ModelCheckpoint(monitor='loss', save_top_k=5, mode='min')
if precision != 'bf16':
precision = int(precision)
trainer = Trainer(default_root_dir=output,
precision=precision,
max_epochs=epochs,
auto_select_gpus=True,
accelerator='gpu',
devices=device,
strategy='ddp',
callbacks=[KrakenTrainProgressBar(), checkpoint_cb, StochasticWeightAveraging(swa_epoch_start=0.8, annealing_epochs=int(0.2*epochs))],
**val_check_interval)
trainer.fit(model, data_module)
@cli.command('pred')
@click.pass_context
@click.option('-i', '--load', help='Input model')
@click.option('-o', '--suffix', default='.overlay.png', show_default=True, help='Suffix for output files')
@click.option('-t', '--threshold', default=0.9, show_default=True, help='Minimum score for objectness')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.argument('input_files', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def pred(ctx, load, suffix, threshold, device, input_files):
curt_model = CurtCurveModel.load_from_checkpoint(load).model
curt_model = curt_model.to(device)
transforms = tf.Compose([tf.Resize(800),
tf.ToTensor(),
tf.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
for file in input_files:
click.echo(f'Processing {file}')
file = pathlib.Path(file)
with open(file, 'rb') as fp:
im = Image.open(file)
with open(file.with_suffix(suffix), 'wb') as fo:
with torch.no_grad():
i = transforms(im).to(device).unsqueeze(0)
mask = torch.zeros((1,) + i.shape[2:], device=device)
i = NestedTensor(i, mask)
o = curt_model(i)
draw = ImageDraw.Draw(im)
samples = np.linspace(0, 1, 20)
curves, logits = o['pred_curves'], o['pred_logits']
scores, labels = logits.softmax(-1).max(-1)
keep = labels.eq(1) & (scores > threshold)
curves = curves[keep]
curves = curves.to('cpu')
for line in curves:
line = (np.array(line) * (im.size * 4))
line.resize(4, 2)
draw.line([tuple(x) for x in np.array(BezierCoeff(samples)).dot(line)], fill=(0, 130, 200, 255), width=2, joint='curve')
del draw
im.save(fo, format='png')
if __name__ == '__main__':
cli() | en | 0.370661 | #! /usr/bin/env python # raise default max image size to 20k * 20k pixels Maps baseline/region merging to a dict of merge structures. # type: ignore # for k, v in data_module.curve_train.dataset.class_mapping.items(): # click.echo(f'{k}\t{v}') #lines + #regions detectable in an image)') # for k, v in data_module.curve_train.dataset.class_mapping.items(): # click.echo(f'{k}\t{v}') | 1.818252 | 2 |
python/tests/test_aWhere.py | aWhereAPI/V1-API-Code-Samples | 1 | 6630411 | <gh_stars>1-10
from unittest import TestCase
import tempfile
from datetime import datetime
import mock
from aWhere import Weather
class WeatherTest(TestCase):
def setUp(self):
self.client = Weather(authorize=False)
def test_reading_credential_file(self):
with tempfile.NamedTemporaryFile() as temp:
temp.write(b'[Weather]\nconsumer_key = ABCDEFG\nconsumer_secret = 123456\n')
temp.flush()
keys = self.client._read_credential(temp.name)
self.assertEqual(keys['consumer_key'], 'ABCDEFG')
self.assertEqual(keys['consumer_secret'], '123456')
def test_key_and_secret_are_hashed(self):
keys = {'consumer_key': 'ABCDEFG',
'consumer_secret': '123456'}
keyhash = self.client._hash_credential(keys)
self.assertEqual(keyhash, 'QUJDREVGRzoxMjM0NTY=')
@mock.patch('aWhere.requests.post')
def test_get_token(self, mock_post):
mock_response = mock.Mock()
expected_response = {'access_token': 'xxxx', 'expires_in': 3599}
mock_response.json.return_value = expected_response
mock_post.return_value = mock_response
keys = {'consumer_key': 'ABCDEFG',
'consumer_secret': '123456'}
response = self.client._get_token(keys)
#mock_post.assert_called_once_with('https://api.awhere.com/oauth/token')
#mock_response.json.assert_called_once()
self.assertEqual(response, expected_response)
@mock.patch('aWhere.Weather._get_token')
def test_authorize(self, mock_get_token):
mock_token = {'access_token': 'xxxx', 'expires_in': 3599}
mock_get_token.return_value = mock_token
self.client.authorize()
expected_authorization = {'Authorization': 'Bearer {}'.format(mock_token['access_token'])}
self.assertEqual(self.client.authorization, expected_authorization)
def test_check_options_with_valid_arguments(self):
validArgs = {'attribute': [],
'endDate': None,
'plantDate': None,
'temperatureUnits': None,
'gddMethod': None,
'baseTemp': None,
'maxTempCap': None,
'minTempCap': None}
try:
self.client._check_options(**validArgs)
except ValueError:
self.fail("Valid argument failed check.")
def test_check_options_with_invalid_arguments(self):
invalidArgs = {'invalid': 'argument'}
self.assertRaises(ValueError, self.client._check_options, **invalidArgs)
def test_check_options_with_invalid_attributes(self):
invalidAttributes = {'attribute': ['invalid']}
self.assertRaises(ValueError, self.client._check_options, **invalidAttributes)
def test_reformat_data(self):
rawdata = [{'dailyAttributes': {'precip': 0.0},'latitude': 0,
'date': '1999-12-31T00:00:00', 'longitude': 0, 'requestId': 0}]
date = datetime.strptime('1999-12-31T00:00:00', '%Y-%m-%dT%H:%M:%S')
expected_data = {date: {'precip': 0.0}}
returned_data = self.client._reformat_data(rawdata)
self.assertEqual(expected_data, returned_data)
def test_reformat_corrupted_data(self):
rawdata = [{'invalid': 'data'}]
self.assertRaises(KeyError, self.client._reformat_data, rawdata)
| from unittest import TestCase
import tempfile
from datetime import datetime
import mock
from aWhere import Weather
class WeatherTest(TestCase):
def setUp(self):
self.client = Weather(authorize=False)
def test_reading_credential_file(self):
with tempfile.NamedTemporaryFile() as temp:
temp.write(b'[Weather]\nconsumer_key = ABCDEFG\nconsumer_secret = 123456\n')
temp.flush()
keys = self.client._read_credential(temp.name)
self.assertEqual(keys['consumer_key'], 'ABCDEFG')
self.assertEqual(keys['consumer_secret'], '123456')
def test_key_and_secret_are_hashed(self):
keys = {'consumer_key': 'ABCDEFG',
'consumer_secret': '123456'}
keyhash = self.client._hash_credential(keys)
self.assertEqual(keyhash, 'QUJDREVGRzoxMjM0NTY=')
@mock.patch('aWhere.requests.post')
def test_get_token(self, mock_post):
mock_response = mock.Mock()
expected_response = {'access_token': 'xxxx', 'expires_in': 3599}
mock_response.json.return_value = expected_response
mock_post.return_value = mock_response
keys = {'consumer_key': 'ABCDEFG',
'consumer_secret': '123456'}
response = self.client._get_token(keys)
#mock_post.assert_called_once_with('https://api.awhere.com/oauth/token')
#mock_response.json.assert_called_once()
self.assertEqual(response, expected_response)
@mock.patch('aWhere.Weather._get_token')
def test_authorize(self, mock_get_token):
mock_token = {'access_token': 'xxxx', 'expires_in': 3599}
mock_get_token.return_value = mock_token
self.client.authorize()
expected_authorization = {'Authorization': 'Bearer {}'.format(mock_token['access_token'])}
self.assertEqual(self.client.authorization, expected_authorization)
def test_check_options_with_valid_arguments(self):
validArgs = {'attribute': [],
'endDate': None,
'plantDate': None,
'temperatureUnits': None,
'gddMethod': None,
'baseTemp': None,
'maxTempCap': None,
'minTempCap': None}
try:
self.client._check_options(**validArgs)
except ValueError:
self.fail("Valid argument failed check.")
def test_check_options_with_invalid_arguments(self):
invalidArgs = {'invalid': 'argument'}
self.assertRaises(ValueError, self.client._check_options, **invalidArgs)
def test_check_options_with_invalid_attributes(self):
invalidAttributes = {'attribute': ['invalid']}
self.assertRaises(ValueError, self.client._check_options, **invalidAttributes)
def test_reformat_data(self):
rawdata = [{'dailyAttributes': {'precip': 0.0},'latitude': 0,
'date': '1999-12-31T00:00:00', 'longitude': 0, 'requestId': 0}]
date = datetime.strptime('1999-12-31T00:00:00', '%Y-%m-%dT%H:%M:%S')
expected_data = {date: {'precip': 0.0}}
returned_data = self.client._reformat_data(rawdata)
self.assertEqual(expected_data, returned_data)
def test_reformat_corrupted_data(self):
rawdata = [{'invalid': 'data'}]
self.assertRaises(KeyError, self.client._reformat_data, rawdata) | en | 0.307058 | #mock_post.assert_called_once_with('https://api.awhere.com/oauth/token') #mock_response.json.assert_called_once() | 2.921687 | 3 |
gallery/serializers.py | glomium/elmnt.de | 0 | 6630412 | #!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from rest_framework import serializers
from .models import Photo
class PhotoSerializer(serializers.HyperlinkedModelSerializer):
image = serializers.SerializerMethodField()
class Meta:
model = Photo
fields = ('url', 'date', 'slug', 'image')
extra_kwargs = {
'url': {'lookup_field': 'slug'},
}
def get_image(self, obj):
if obj and obj.image:
return obj.image.url
return ''
| #!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from rest_framework import serializers
from .models import Photo
class PhotoSerializer(serializers.HyperlinkedModelSerializer):
image = serializers.SerializerMethodField()
class Meta:
model = Photo
fields = ('url', 'date', 'slug', 'image')
extra_kwargs = {
'url': {'lookup_field': 'slug'},
}
def get_image(self, obj):
if obj and obj.image:
return obj.image.url
return ''
| en | 0.434981 | #!/usr/bin/python # ex:set fileencoding=utf-8: | 2.271686 | 2 |
detect.py | GuoQuanhao/Tiny-Face-Paddle | 0 | 6630413 | import numpy as np
from paddle.vision import transforms
from models.model import DetectionModel
import paddle
import trainer
import json
import time
import cv2
num_templates = 25
nms_thresh = 0.2
prob_thresh = 0.9
templates = json.load(open('./datasets/templates.json'))
templates = np.round_(np.array(templates), decimals=8)
dets = np.empty((0, 5)) # store bbox (x1, y1, x2, y2), score
rf = {'size': [859, 859],
'stride': [8, 8],
'offset': [-1, -1]}
val_transforms = transforms.Compose([transforms.Transpose(),
transforms.Normalize(mean=[0.0, 0.0, 0.0],
std=[255, 255, 255]),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
def main():
img = cv2.cvtColor(cv2.imread('./assets/test.jpg'), cv2.COLOR_BGR2RGB).astype('float32')
img_raw = cv2.imread('./assets/test.jpg')
input = paddle.to_tensor(val_transforms(img)).unsqueeze(0)
model = DetectionModel(num_objects=1, num_templates=num_templates)
model.set_state_dict(paddle.load('./weights/checkpoint_80.pdparams')["model"])
start = time.time()
dets = trainer.get_detections(model, input, templates, rf,
val_transforms, prob_thresh,
nms_thresh)
end = time.time()
for idx, bbox in enumerate(dets):
bbox = np.round(bbox)
cv2.rectangle(img_raw, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 2)
print("Inference Speed:", end-start)
print("Saved result.jpg")
cv2.imwrite('result.jpg', img_raw)
if __name__ == '__main__':
main()
| import numpy as np
from paddle.vision import transforms
from models.model import DetectionModel
import paddle
import trainer
import json
import time
import cv2
num_templates = 25
nms_thresh = 0.2
prob_thresh = 0.9
templates = json.load(open('./datasets/templates.json'))
templates = np.round_(np.array(templates), decimals=8)
dets = np.empty((0, 5)) # store bbox (x1, y1, x2, y2), score
rf = {'size': [859, 859],
'stride': [8, 8],
'offset': [-1, -1]}
val_transforms = transforms.Compose([transforms.Transpose(),
transforms.Normalize(mean=[0.0, 0.0, 0.0],
std=[255, 255, 255]),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
def main():
img = cv2.cvtColor(cv2.imread('./assets/test.jpg'), cv2.COLOR_BGR2RGB).astype('float32')
img_raw = cv2.imread('./assets/test.jpg')
input = paddle.to_tensor(val_transforms(img)).unsqueeze(0)
model = DetectionModel(num_objects=1, num_templates=num_templates)
model.set_state_dict(paddle.load('./weights/checkpoint_80.pdparams')["model"])
start = time.time()
dets = trainer.get_detections(model, input, templates, rf,
val_transforms, prob_thresh,
nms_thresh)
end = time.time()
for idx, bbox in enumerate(dets):
bbox = np.round(bbox)
cv2.rectangle(img_raw, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 2)
print("Inference Speed:", end-start)
print("Saved result.jpg")
cv2.imwrite('result.jpg', img_raw)
if __name__ == '__main__':
main()
| en | 0.545512 | # store bbox (x1, y1, x2, y2), score | 2.2269 | 2 |
test/args/models/no_defaults.py | pvandyken/cluster_utils | 0 | 6630414 | from __future__ import absolute_import
import re
import test.args.models.formatters as formatters
from test.args.models.common import ModelTest, update_model
from typing import Any, List
import attr
from typing_extensions import TypedDict
from kslurm.args.arg_types import FlagArg, KeywordArg, ShapeArg, TailArg
from kslurm.exceptions import MandatoryArgError
def get_tests(model: object) -> List[List[List[Any]]]:
return [
[
# Providing everything works
[
"07:23",
"gpu",
"3",
"--length_5_keyword",
"one",
"of",
"the",
"five",
"values",
"command",
],
# Raise Exception if missing stuff
["gpu"],
],
[
update_model(
model,
[
"07:23",
True,
3,
[True, ["one", "of", "the", "five", "values"]],
[None, ["command"]],
],
),
[
MandatoryArgError("time has not been provided a value."),
MandatoryArgError("number has not been provided a value."),
MandatoryArgError("--length_5_keyword has not been provided a value."),
],
],
]
@attr.s(auto_attribs=True)
class AttrModel:
time: ShapeArg[str] = ShapeArg(
id="random",
match=lambda arg: bool(re.match(r"^([0-9]{1,2}-)?[0-9]{1,2}:[0-9]{2}$", arg)),
format=formatters.time,
)
gpu: FlagArg = FlagArg(match=["gpu"])
number: ShapeArg[int] = ShapeArg(
match=lambda arg: bool(re.match(r"^[0-9]+$", arg)), format=int
)
length_5_keyword: KeywordArg[str] = KeywordArg(
id="length_5_keyword", match=["--length_5_keyword"], num=5, value=None
)
tail: TailArg = TailArg()
class TypedDictModel(TypedDict):
time: ShapeArg[str]
gpu: FlagArg
number: ShapeArg[str]
length_5_keyword: KeywordArg[str]
tail: TailArg
attrmodel = AttrModel()
typed_dict_model = TypedDictModel(
time=ShapeArg(
id="random",
match=lambda arg: bool(re.match(r"^([0-9]{1,2}-)?[0-9]{1,2}:[0-9]{2}$", arg)),
format=formatters.time,
),
gpu=FlagArg(match=["gpu"]),
number=ShapeArg(match=lambda arg: bool(re.match(r"^[0-9]+$", arg))),
length_5_keyword=KeywordArg[str](
id="length_5_keyword", match=["--length_5_keyword"], num=5, value=None
),
tail=TailArg(),
)
no_default_attr = ModelTest(attrmodel, *get_tests(attrmodel))
no_default_dict = ModelTest(typed_dict_model, *get_tests(typed_dict_model))
| from __future__ import absolute_import
import re
import test.args.models.formatters as formatters
from test.args.models.common import ModelTest, update_model
from typing import Any, List
import attr
from typing_extensions import TypedDict
from kslurm.args.arg_types import FlagArg, KeywordArg, ShapeArg, TailArg
from kslurm.exceptions import MandatoryArgError
def get_tests(model: object) -> List[List[List[Any]]]:
return [
[
# Providing everything works
[
"07:23",
"gpu",
"3",
"--length_5_keyword",
"one",
"of",
"the",
"five",
"values",
"command",
],
# Raise Exception if missing stuff
["gpu"],
],
[
update_model(
model,
[
"07:23",
True,
3,
[True, ["one", "of", "the", "five", "values"]],
[None, ["command"]],
],
),
[
MandatoryArgError("time has not been provided a value."),
MandatoryArgError("number has not been provided a value."),
MandatoryArgError("--length_5_keyword has not been provided a value."),
],
],
]
@attr.s(auto_attribs=True)
class AttrModel:
time: ShapeArg[str] = ShapeArg(
id="random",
match=lambda arg: bool(re.match(r"^([0-9]{1,2}-)?[0-9]{1,2}:[0-9]{2}$", arg)),
format=formatters.time,
)
gpu: FlagArg = FlagArg(match=["gpu"])
number: ShapeArg[int] = ShapeArg(
match=lambda arg: bool(re.match(r"^[0-9]+$", arg)), format=int
)
length_5_keyword: KeywordArg[str] = KeywordArg(
id="length_5_keyword", match=["--length_5_keyword"], num=5, value=None
)
tail: TailArg = TailArg()
class TypedDictModel(TypedDict):
time: ShapeArg[str]
gpu: FlagArg
number: ShapeArg[str]
length_5_keyword: KeywordArg[str]
tail: TailArg
attrmodel = AttrModel()
typed_dict_model = TypedDictModel(
time=ShapeArg(
id="random",
match=lambda arg: bool(re.match(r"^([0-9]{1,2}-)?[0-9]{1,2}:[0-9]{2}$", arg)),
format=formatters.time,
),
gpu=FlagArg(match=["gpu"]),
number=ShapeArg(match=lambda arg: bool(re.match(r"^[0-9]+$", arg))),
length_5_keyword=KeywordArg[str](
id="length_5_keyword", match=["--length_5_keyword"], num=5, value=None
),
tail=TailArg(),
)
no_default_attr = ModelTest(attrmodel, *get_tests(attrmodel))
no_default_dict = ModelTest(typed_dict_model, *get_tests(typed_dict_model))
| en | 0.560613 | # Providing everything works # Raise Exception if missing stuff | 2.217807 | 2 |
cli/tests/integrations/test_marathon.py | ArmandGrillet/dcos-cli | 0 | 6630415 | import contextlib
import json
import os
import re
import sys
import threading
import pytest
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from dcos import constants
from .common import (app, assert_command, assert_lines,
exec_command, list_deployments, popen_tty,
show_app, update_config, watch_all_deployments,
watch_deployment)
_ZERO_INSTANCE_APP_ID = 'zero-instance-app'
_ZERO_INSTANCE_APP_INSTANCES = 100
def test_help():
with open('dcoscli/data/help/marathon.txt') as content:
assert_command(['dcos', 'marathon', '--help'],
stdout=content.read().encode('utf-8'))
def test_version():
assert_command(['dcos', 'marathon', '--version'],
stdout=b'dcos-marathon version SNAPSHOT\n')
def test_info():
assert_command(['dcos', 'marathon', '--info'],
stdout=b'Deploy and manage applications to DC/OS\n')
def test_about():
returncode, stdout, stderr = exec_command(['dcos', 'marathon', 'about'])
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert result['name'] == "marathon"
@pytest.fixture
def env():
r = os.environ.copy()
r.update({
constants.PATH_ENV: os.environ[constants.PATH_ENV],
constants.DCOS_CONFIG_ENV: os.path.join("tests", "data", "dcos.toml"),
})
return r
def test_missing_config(env):
with update_config("core.dcos_url", None, env):
assert_command(
['dcos', 'marathon', 'app', 'list'],
returncode=1,
stderr=(b'Missing required config parameter: "core.dcos_url". '
b'Please run `dcos config set core.dcos_url <value>`.\n'),
env=env)
def test_empty_list():
_list_apps()
def test_add_app():
with _zero_instance_app():
_list_apps('zero-instance-app')
def test_add_app_through_http():
with _zero_instance_app_through_http():
_list_apps('zero-instance-app')
def test_add_app_bad_resource():
stderr = (b'Can\'t read from resource: bad_resource.\n'
b'Please check that it exists.\n')
assert_command(['dcos', 'marathon', 'app', 'add', 'bad_resource'],
returncode=1,
stderr=stderr)
def test_add_app_with_filename():
with _zero_instance_app():
_list_apps('zero-instance-app')
def test_remove_app():
with _zero_instance_app():
pass
_list_apps()
def test_add_bad_json_app():
with open('tests/data/marathon/apps/bad.json') as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'add'],
stdin=fd)
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith('Error loading JSON: ')
def test_add_existing_app():
with _zero_instance_app():
app_path = 'tests/data/marathon/apps/zero_instance_sleep_v2.json'
with open(app_path) as fd:
stderr = b"Application '/zero-instance-app' already exists\n"
assert_command(['dcos', 'marathon', 'app', 'add'],
returncode=1,
stderr=stderr,
stdin=fd)
def test_show_app():
with _zero_instance_app():
show_app('zero-instance-app')
def test_show_absolute_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
result = show_app('zero-instance-app')
show_app('zero-instance-app', result['version'])
def test_show_relative_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
show_app('zero-instance-app', "-1")
def test_show_missing_relative_app_version():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
# Marathon persists app versions indefinitely by ID, so pick a large
# index here in case the history is long
cmd = ['dcos', 'marathon', 'app', 'show', '--app-version=-200', app_id]
returncode, stdout, stderr = exec_command(cmd)
assert returncode == 1
assert stdout == b''
pattern = ("Application 'zero-instance-app' only has [1-9][0-9]* "
"version\\(s\\)\\.\n")
assert re.fullmatch(pattern, stderr.decode('utf-8'), flags=re.DOTALL)
def test_show_missing_absolute_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show',
'--app-version=2000-02-11T20:39:32.972Z', 'zero-instance-app'])
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith(
"Error: App '/zero-instance-app' does not exist")
def test_show_bad_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show', '--app-version=20:39:32.972Z',
'zero-instance-app'])
assert returncode == 1
assert stdout == b''
assert stderr.startswith(b'Error while fetching')
pattern = (b"""{"message":"Invalid format: """
b"""\\"20:39:32.972Z\\" is malformed"""
b""" at \\":39:32.972Z\\""}\n""")
assert stderr.endswith(pattern)
def test_show_bad_relative_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
assert_command(
['dcos', 'marathon', 'app', 'show',
'--app-version=2', 'zero-instance-app'],
returncode=1,
stderr=b"Relative versions must be negative: 2\n")
def test_start_missing_app():
assert_command(
['dcos', 'marathon', 'app', 'start', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_start_app():
with _zero_instance_app():
_start_app('zero-instance-app')
def test_start_already_started_app():
with _zero_instance_app():
_start_app('zero-instance-app')
stdout = (b"Application 'zero-instance-app' already "
b"started: 1 instances.\n")
assert_command(
['dcos', 'marathon', 'app', 'start', 'zero-instance-app'],
returncode=1,
stdout=stdout)
def test_stop_missing_app():
assert_command(['dcos', 'marathon', 'app', 'stop', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_stop_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'stop', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_stop_already_stopped_app():
with _zero_instance_app():
stdout = (b"Application 'zero-instance-app' already "
b"stopped: 0 instances.\n")
assert_command(
['dcos', 'marathon', 'app', 'stop', 'zero-instance-app'],
returncode=1,
stdout=stdout)
def test_update_missing_app():
assert_command(['dcos', 'marathon', 'app', 'update', 'missing-id'],
stderr=b"Error: App '/missing-id' does not exist\n",
returncode=1)
def test_update_bad_type():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update',
'zero-instance-app', 'cpus="a string"'])
stderr_end = b"""{
"details": [
{
"errors": [
"error.expected.jsnumber"
],
"path": "/cpus"
}
],
"message": "Invalid JSON"
}
"""
assert returncode == 1
assert stderr_end in stderr
assert stdout == b''
def test_update_invalid_request():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', '{', 'instances'])
assert returncode == 1
assert stdout == b''
stderr = stderr.decode()
# TODO (tamar): this becomes 'Error: App '/{' does not exist\n"'
# in Marathon 0.11.0
assert stderr.startswith('Error on request')
assert stderr.endswith('HTTP 400: Bad Request\n')
def test_app_add_invalid_request():
path = os.path.join(
'tests', 'data', 'marathon', 'apps', 'app_add_400.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'add', path])
assert returncode == 1
assert stdout == b''
assert re.match(b"Error on request \[POST .*\]: HTTP 400: Bad Request:",
stderr)
stderr_end = b"""{
"details": [
{
"errors": [
"host is not a valid network type"
],
"path": "/container/docker/network"
}
],
"message": "Invalid JSON"
}
"""
assert stderr.endswith(stderr_end)
def test_update_app():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', 'zero-instance-app',
'cpus=1', 'mem=20', "cmd='sleep 100'"])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_update_app_from_stdin():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
def test_restarting_stopped_app():
with _zero_instance_app():
stdout = (b"Unable to perform rolling restart of application '"
b"/zero-instance-app' because it has no running tasks\n")
assert_command(
['dcos', 'marathon', 'app', 'restart', 'zero-instance-app'],
returncode=1,
stdout=stdout)
def test_restarting_missing_app():
assert_command(['dcos', 'marathon', 'app', 'restart', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_restarting_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'restart', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_killing_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
task_set_1 = set([task['id']
for task in _list_tasks(3, 'zero-instance-app')])
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Killed tasks: ')
assert stderr == b''
watch_all_deployments()
task_set_2 = set([task['id']
for task in _list_tasks(app_id='zero-instance-app')])
assert len(task_set_1.intersection(task_set_2)) == 0
def test_killing_scaling_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3)
command = ['dcos', 'marathon', 'app', 'kill', '--scale',
'zero-instance-app']
returncode, stdout, stderr = exec_command(command)
assert returncode == 0
assert stdout.decode().startswith('Started deployment: ')
assert stdout.decode().find('version') > -1
assert stdout.decode().find('deploymentId') > -1
assert stderr == b''
watch_all_deployments()
_list_tasks(0)
def test_killing_with_host_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
existing_tasks = _list_tasks(3, 'zero-instance-app')
task_hosts = set([task['host'] for task in existing_tasks])
if len(task_hosts) <= 1:
pytest.skip('test needs 2 or more agents to succeed, '
'only {} agents available'.format(len(task_hosts)))
assert len(task_hosts) > 1
kill_host = list(task_hosts)[0]
expected_to_be_killed = set([task['id']
for task in existing_tasks
if task['host'] == kill_host])
not_to_be_killed = set([task['id']
for task in existing_tasks
if task['host'] != kill_host])
assert len(not_to_be_killed) > 0
assert len(expected_to_be_killed) > 0
command = ['dcos', 'marathon', 'app', 'kill', '--host', kill_host,
'zero-instance-app']
returncode, stdout, stderr = exec_command(command)
assert stdout.decode().startswith('Killed tasks: ')
assert stderr == b''
new_tasks = set([task['id'] for task in _list_tasks()])
assert not_to_be_killed.intersection(new_tasks) == not_to_be_killed
assert len(expected_to_be_killed.intersection(new_tasks)) == 0
@pytest.mark.skipif(
True, reason='https://github.com/mesosphere/marathon/issues/3251')
def test_kill_stopped_app():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'zero-instance-app'])
assert returncode == 1
assert stdout.decode().startswith('Killed tasks: []')
def test_kill_missing_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'app'])
assert returncode == 1
assert stdout.decode() == ''
stderr_expected = "Error: App '/app' does not exist"
assert stderr.decode().strip() == stderr_expected
def test_list_version_missing_app():
assert_command(
['dcos', 'marathon', 'app', 'version', 'list', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_list_version_negative_max_count():
assert_command(['dcos', 'marathon', 'app', 'version', 'list',
'missing-id', '--max-count=-1'],
returncode=1,
stderr=b'Maximum count must be a positive number: -1\n')
def test_list_version_app():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_list_versions(app_id, 1)
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
_list_versions(app_id, 2)
def test_list_version_max_count():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
_list_versions(app_id, 1, 1)
_list_versions(app_id, 2, 2)
_list_versions(app_id, 2, 3)
def test_list_empty_deployment():
list_deployments(0)
def test_list_deployment():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
list_deployments(1)
def test_list_deployment_table():
"""Simple sanity check for listing deployments with a table output.
The more specific testing is done in unit tests.
"""
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
assert_lines(['dcos', 'marathon', 'deployment', 'list'], 2)
def test_list_deployment_missing_app():
with _zero_instance_app():
_start_app('zero-instance-app')
list_deployments(0, 'missing-id')
def test_list_deployment_app():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
list_deployments(1, 'zero-instance-app')
def test_rollback_missing_deployment():
assert_command(
['dcos', 'marathon', 'deployment', 'rollback', 'missing-deployment'],
returncode=1,
stderr=b'Error: DeploymentPlan missing-deployment does not exist\n')
def test_rollback_deployment():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'deployment', 'rollback', result[0]['id']])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert 'deploymentId' in result
assert 'version' in result
assert stderr == b''
watch_all_deployments()
list_deployments(0)
def test_stop_deployment():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
assert_command(
['dcos', 'marathon', 'deployment', 'stop', result[0]['id']])
list_deployments(0)
def test_watching_missing_deployment():
watch_deployment('missing-deployment', 1)
def test_watching_deployment():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
watch_deployment(result[0]['id'], 60)
assert_command(
['dcos', 'marathon', 'deployment', 'stop', result[0]['id']])
list_deployments(0, 'zero-instance-app')
def test_list_empty_task():
_list_tasks(0)
def test_list_empty_task_not_running_app():
with _zero_instance_app():
_list_tasks(0)
def test_list_tasks():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3)
def test_list_tasks_table():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
assert_lines(['dcos', 'marathon', 'task', 'list'], 4)
def test_list_app_tasks():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3, 'zero-instance-app')
def test_list_missing_app_tasks():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(0, 'missing-id')
def test_show_missing_task():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'task', 'show', 'missing-id'])
stderr = stderr.decode('utf-8')
assert returncode == 1
assert stdout == b''
assert stderr.startswith("Task '")
assert stderr.endswith("' does not exist\n")
def test_show_task():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
result = _list_tasks(3, 'zero-instance-app')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'task', 'show', result[0]['id']])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert result['appId'] == '/zero-instance-app'
assert stderr == b''
def test_stop_task():
with _zero_instance_app():
_start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = task_list[0]['id']
_stop_task(task_id)
def test_stop_task_wipe():
with _zero_instance_app():
_start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = task_list[0]['id']
_stop_task(task_id, '--wipe')
def test_stop_unknown_task():
with _zero_instance_app():
_start_app('zero-instance-app')
watch_all_deployments()
task_id = 'unknown-task-id'
_stop_task(task_id, expect_success=False)
def test_stop_unknown_task_wipe():
with _zero_instance_app():
_start_app('zero-instance-app')
watch_all_deployments()
task_id = 'unknown-task-id'
_stop_task(task_id, '--wipe', expect_success=False)
def test_bad_configuration(env):
with update_config('marathon.url', 'http://localhost:88888', env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'about'], env=env)
assert returncode == 1
assert stdout == b''
assert stderr.startswith(
b"URL [http://localhost:88888/v2/info] is unreachable")
def test_app_locked_error():
with app('tests/data/marathon/apps/sleep_many_instances.json',
'/sleep-many-instances',
wait=False):
stderr = b'Changes blocked: deployment already in progress for app.\n'
assert_command(
['dcos', 'marathon', 'app', 'stop', 'sleep-many-instances'],
returncode=1,
stderr=stderr)
@pytest.mark.skipif(sys.platform == 'win32',
reason="No pseudo terminal on windows")
def test_app_add_no_tty():
proc, master = popen_tty('dcos marathon app add')
stdout, stderr = proc.communicate()
os.close(master)
print(stdout)
print(stderr)
assert proc.wait() == 1
assert stdout == b''
assert stderr == (b"We currently don't support reading from the TTY. "
b"Please specify an application JSON.\n"
b"E.g.: dcos marathon app add < app_resource.json\n")
def _list_apps(app_id=None):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'list', '--json'])
result = json.loads(stdout.decode('utf-8'))
if app_id is None:
assert len(result) == 0
else:
assert len(result) == 1
assert result[0]['id'] == '/' + app_id
assert returncode == 0
assert stderr == b''
return result
def _start_app(app_id, instances=None):
cmd = ['dcos', 'marathon', 'app', 'start', app_id]
if instances is not None:
cmd.append(str(instances))
returncode, stdout, stderr = exec_command(cmd)
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def _update_app(app_id, file_path):
with open(file_path) as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', app_id],
stdin=fd)
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def _list_versions(app_id, expected_min_count, max_count=None):
cmd = ['dcos', 'marathon', 'app', 'version', 'list', app_id]
if max_count is not None:
cmd.append('--max-count={}'.format(max_count))
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert isinstance(result, list)
assert stderr == b''
# Marathon persists app versions indefinitely by ID, so there may be extras
assert len(result) >= expected_min_count
if max_count is not None:
assert len(result) <= max_count
def _list_tasks(expected_count=None, app_id=None):
cmd = ['dcos', 'marathon', 'task', 'list', '--json']
if app_id is not None:
cmd.append(app_id)
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
if expected_count:
assert len(result) == expected_count
assert stderr == b''
return result
def _stop_task(task_id, wipe=None, expect_success=True):
cmd = ['dcos', 'marathon', 'task', 'stop', task_id]
if wipe is not None:
cmd.append('--wipe')
returncode, stdout, stderr = exec_command(cmd)
if expect_success:
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert result['id'] == task_id
else:
assert returncode == 1
@contextlib.contextmanager
def _zero_instance_app():
with app('tests/data/marathon/apps/zero_instance_sleep.json',
'zero-instance-app'):
yield
@contextlib.contextmanager
def _zero_instance_app_through_http():
class JSONRequestHandler (BaseHTTPRequestHandler):
def do_GET(self): # noqa: N802
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(open(
'tests/data/marathon/apps/zero_instance_sleep.json',
'rb').read())
host = 'localhost'
port = 12345
server = HTTPServer((host, port), JSONRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
with app('http://{}:{}'.format(host, port), 'zero-instance-app'):
try:
yield
finally:
server.shutdown()
| import contextlib
import json
import os
import re
import sys
import threading
import pytest
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from dcos import constants
from .common import (app, assert_command, assert_lines,
exec_command, list_deployments, popen_tty,
show_app, update_config, watch_all_deployments,
watch_deployment)
_ZERO_INSTANCE_APP_ID = 'zero-instance-app'
_ZERO_INSTANCE_APP_INSTANCES = 100
def test_help():
with open('dcoscli/data/help/marathon.txt') as content:
assert_command(['dcos', 'marathon', '--help'],
stdout=content.read().encode('utf-8'))
def test_version():
assert_command(['dcos', 'marathon', '--version'],
stdout=b'dcos-marathon version SNAPSHOT\n')
def test_info():
assert_command(['dcos', 'marathon', '--info'],
stdout=b'Deploy and manage applications to DC/OS\n')
def test_about():
returncode, stdout, stderr = exec_command(['dcos', 'marathon', 'about'])
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert result['name'] == "marathon"
@pytest.fixture
def env():
r = os.environ.copy()
r.update({
constants.PATH_ENV: os.environ[constants.PATH_ENV],
constants.DCOS_CONFIG_ENV: os.path.join("tests", "data", "dcos.toml"),
})
return r
def test_missing_config(env):
with update_config("core.dcos_url", None, env):
assert_command(
['dcos', 'marathon', 'app', 'list'],
returncode=1,
stderr=(b'Missing required config parameter: "core.dcos_url". '
b'Please run `dcos config set core.dcos_url <value>`.\n'),
env=env)
def test_empty_list():
_list_apps()
def test_add_app():
with _zero_instance_app():
_list_apps('zero-instance-app')
def test_add_app_through_http():
with _zero_instance_app_through_http():
_list_apps('zero-instance-app')
def test_add_app_bad_resource():
stderr = (b'Can\'t read from resource: bad_resource.\n'
b'Please check that it exists.\n')
assert_command(['dcos', 'marathon', 'app', 'add', 'bad_resource'],
returncode=1,
stderr=stderr)
def test_add_app_with_filename():
with _zero_instance_app():
_list_apps('zero-instance-app')
def test_remove_app():
with _zero_instance_app():
pass
_list_apps()
def test_add_bad_json_app():
with open('tests/data/marathon/apps/bad.json') as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'add'],
stdin=fd)
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith('Error loading JSON: ')
def test_add_existing_app():
with _zero_instance_app():
app_path = 'tests/data/marathon/apps/zero_instance_sleep_v2.json'
with open(app_path) as fd:
stderr = b"Application '/zero-instance-app' already exists\n"
assert_command(['dcos', 'marathon', 'app', 'add'],
returncode=1,
stderr=stderr,
stdin=fd)
def test_show_app():
with _zero_instance_app():
show_app('zero-instance-app')
def test_show_absolute_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
result = show_app('zero-instance-app')
show_app('zero-instance-app', result['version'])
def test_show_relative_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
show_app('zero-instance-app', "-1")
def test_show_missing_relative_app_version():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
# Marathon persists app versions indefinitely by ID, so pick a large
# index here in case the history is long
cmd = ['dcos', 'marathon', 'app', 'show', '--app-version=-200', app_id]
returncode, stdout, stderr = exec_command(cmd)
assert returncode == 1
assert stdout == b''
pattern = ("Application 'zero-instance-app' only has [1-9][0-9]* "
"version\\(s\\)\\.\n")
assert re.fullmatch(pattern, stderr.decode('utf-8'), flags=re.DOTALL)
def test_show_missing_absolute_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show',
'--app-version=2000-02-11T20:39:32.972Z', 'zero-instance-app'])
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith(
"Error: App '/zero-instance-app' does not exist")
def test_show_bad_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show', '--app-version=20:39:32.972Z',
'zero-instance-app'])
assert returncode == 1
assert stdout == b''
assert stderr.startswith(b'Error while fetching')
pattern = (b"""{"message":"Invalid format: """
b"""\\"20:39:32.972Z\\" is malformed"""
b""" at \\":39:32.972Z\\""}\n""")
assert stderr.endswith(pattern)
def test_show_bad_relative_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
assert_command(
['dcos', 'marathon', 'app', 'show',
'--app-version=2', 'zero-instance-app'],
returncode=1,
stderr=b"Relative versions must be negative: 2\n")
def test_start_missing_app():
assert_command(
['dcos', 'marathon', 'app', 'start', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_start_app():
with _zero_instance_app():
_start_app('zero-instance-app')
def test_start_already_started_app():
with _zero_instance_app():
_start_app('zero-instance-app')
stdout = (b"Application 'zero-instance-app' already "
b"started: 1 instances.\n")
assert_command(
['dcos', 'marathon', 'app', 'start', 'zero-instance-app'],
returncode=1,
stdout=stdout)
def test_stop_missing_app():
assert_command(['dcos', 'marathon', 'app', 'stop', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_stop_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'stop', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_stop_already_stopped_app():
with _zero_instance_app():
stdout = (b"Application 'zero-instance-app' already "
b"stopped: 0 instances.\n")
assert_command(
['dcos', 'marathon', 'app', 'stop', 'zero-instance-app'],
returncode=1,
stdout=stdout)
def test_update_missing_app():
assert_command(['dcos', 'marathon', 'app', 'update', 'missing-id'],
stderr=b"Error: App '/missing-id' does not exist\n",
returncode=1)
def test_update_bad_type():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update',
'zero-instance-app', 'cpus="a string"'])
stderr_end = b"""{
"details": [
{
"errors": [
"error.expected.jsnumber"
],
"path": "/cpus"
}
],
"message": "Invalid JSON"
}
"""
assert returncode == 1
assert stderr_end in stderr
assert stdout == b''
def test_update_invalid_request():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', '{', 'instances'])
assert returncode == 1
assert stdout == b''
stderr = stderr.decode()
# TODO (tamar): this becomes 'Error: App '/{' does not exist\n"'
# in Marathon 0.11.0
assert stderr.startswith('Error on request')
assert stderr.endswith('HTTP 400: Bad Request\n')
def test_app_add_invalid_request():
path = os.path.join(
'tests', 'data', 'marathon', 'apps', 'app_add_400.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'add', path])
assert returncode == 1
assert stdout == b''
assert re.match(b"Error on request \[POST .*\]: HTTP 400: Bad Request:",
stderr)
stderr_end = b"""{
"details": [
{
"errors": [
"host is not a valid network type"
],
"path": "/container/docker/network"
}
],
"message": "Invalid JSON"
}
"""
assert stderr.endswith(stderr_end)
def test_update_app():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', 'zero-instance-app',
'cpus=1', 'mem=20', "cmd='sleep 100'"])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_update_app_from_stdin():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
def test_restarting_stopped_app():
with _zero_instance_app():
stdout = (b"Unable to perform rolling restart of application '"
b"/zero-instance-app' because it has no running tasks\n")
assert_command(
['dcos', 'marathon', 'app', 'restart', 'zero-instance-app'],
returncode=1,
stdout=stdout)
def test_restarting_missing_app():
assert_command(['dcos', 'marathon', 'app', 'restart', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_restarting_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'restart', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_killing_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
task_set_1 = set([task['id']
for task in _list_tasks(3, 'zero-instance-app')])
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Killed tasks: ')
assert stderr == b''
watch_all_deployments()
task_set_2 = set([task['id']
for task in _list_tasks(app_id='zero-instance-app')])
assert len(task_set_1.intersection(task_set_2)) == 0
def test_killing_scaling_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3)
command = ['dcos', 'marathon', 'app', 'kill', '--scale',
'zero-instance-app']
returncode, stdout, stderr = exec_command(command)
assert returncode == 0
assert stdout.decode().startswith('Started deployment: ')
assert stdout.decode().find('version') > -1
assert stdout.decode().find('deploymentId') > -1
assert stderr == b''
watch_all_deployments()
_list_tasks(0)
def test_killing_with_host_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
existing_tasks = _list_tasks(3, 'zero-instance-app')
task_hosts = set([task['host'] for task in existing_tasks])
if len(task_hosts) <= 1:
pytest.skip('test needs 2 or more agents to succeed, '
'only {} agents available'.format(len(task_hosts)))
assert len(task_hosts) > 1
kill_host = list(task_hosts)[0]
expected_to_be_killed = set([task['id']
for task in existing_tasks
if task['host'] == kill_host])
not_to_be_killed = set([task['id']
for task in existing_tasks
if task['host'] != kill_host])
assert len(not_to_be_killed) > 0
assert len(expected_to_be_killed) > 0
command = ['dcos', 'marathon', 'app', 'kill', '--host', kill_host,
'zero-instance-app']
returncode, stdout, stderr = exec_command(command)
assert stdout.decode().startswith('Killed tasks: ')
assert stderr == b''
new_tasks = set([task['id'] for task in _list_tasks()])
assert not_to_be_killed.intersection(new_tasks) == not_to_be_killed
assert len(expected_to_be_killed.intersection(new_tasks)) == 0
@pytest.mark.skipif(
True, reason='https://github.com/mesosphere/marathon/issues/3251')
def test_kill_stopped_app():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'zero-instance-app'])
assert returncode == 1
assert stdout.decode().startswith('Killed tasks: []')
def test_kill_missing_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'app'])
assert returncode == 1
assert stdout.decode() == ''
stderr_expected = "Error: App '/app' does not exist"
assert stderr.decode().strip() == stderr_expected
def test_list_version_missing_app():
assert_command(
['dcos', 'marathon', 'app', 'version', 'list', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_list_version_negative_max_count():
assert_command(['dcos', 'marathon', 'app', 'version', 'list',
'missing-id', '--max-count=-1'],
returncode=1,
stderr=b'Maximum count must be a positive number: -1\n')
def test_list_version_app():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_list_versions(app_id, 1)
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
_list_versions(app_id, 2)
def test_list_version_max_count():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
_list_versions(app_id, 1, 1)
_list_versions(app_id, 2, 2)
_list_versions(app_id, 2, 3)
def test_list_empty_deployment():
list_deployments(0)
def test_list_deployment():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
list_deployments(1)
def test_list_deployment_table():
"""Simple sanity check for listing deployments with a table output.
The more specific testing is done in unit tests.
"""
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
assert_lines(['dcos', 'marathon', 'deployment', 'list'], 2)
def test_list_deployment_missing_app():
with _zero_instance_app():
_start_app('zero-instance-app')
list_deployments(0, 'missing-id')
def test_list_deployment_app():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
list_deployments(1, 'zero-instance-app')
def test_rollback_missing_deployment():
assert_command(
['dcos', 'marathon', 'deployment', 'rollback', 'missing-deployment'],
returncode=1,
stderr=b'Error: DeploymentPlan missing-deployment does not exist\n')
def test_rollback_deployment():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'deployment', 'rollback', result[0]['id']])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert 'deploymentId' in result
assert 'version' in result
assert stderr == b''
watch_all_deployments()
list_deployments(0)
def test_stop_deployment():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
assert_command(
['dcos', 'marathon', 'deployment', 'stop', result[0]['id']])
list_deployments(0)
def test_watching_missing_deployment():
watch_deployment('missing-deployment', 1)
def test_watching_deployment():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
watch_deployment(result[0]['id'], 60)
assert_command(
['dcos', 'marathon', 'deployment', 'stop', result[0]['id']])
list_deployments(0, 'zero-instance-app')
def test_list_empty_task():
_list_tasks(0)
def test_list_empty_task_not_running_app():
with _zero_instance_app():
_list_tasks(0)
def test_list_tasks():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3)
def test_list_tasks_table():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
assert_lines(['dcos', 'marathon', 'task', 'list'], 4)
def test_list_app_tasks():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3, 'zero-instance-app')
def test_list_missing_app_tasks():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(0, 'missing-id')
def test_show_missing_task():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'task', 'show', 'missing-id'])
stderr = stderr.decode('utf-8')
assert returncode == 1
assert stdout == b''
assert stderr.startswith("Task '")
assert stderr.endswith("' does not exist\n")
def test_show_task():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
result = _list_tasks(3, 'zero-instance-app')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'task', 'show', result[0]['id']])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert result['appId'] == '/zero-instance-app'
assert stderr == b''
def test_stop_task():
with _zero_instance_app():
_start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = task_list[0]['id']
_stop_task(task_id)
def test_stop_task_wipe():
with _zero_instance_app():
_start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = task_list[0]['id']
_stop_task(task_id, '--wipe')
def test_stop_unknown_task():
with _zero_instance_app():
_start_app('zero-instance-app')
watch_all_deployments()
task_id = 'unknown-task-id'
_stop_task(task_id, expect_success=False)
def test_stop_unknown_task_wipe():
with _zero_instance_app():
_start_app('zero-instance-app')
watch_all_deployments()
task_id = 'unknown-task-id'
_stop_task(task_id, '--wipe', expect_success=False)
def test_bad_configuration(env):
with update_config('marathon.url', 'http://localhost:88888', env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'about'], env=env)
assert returncode == 1
assert stdout == b''
assert stderr.startswith(
b"URL [http://localhost:88888/v2/info] is unreachable")
def test_app_locked_error():
with app('tests/data/marathon/apps/sleep_many_instances.json',
'/sleep-many-instances',
wait=False):
stderr = b'Changes blocked: deployment already in progress for app.\n'
assert_command(
['dcos', 'marathon', 'app', 'stop', 'sleep-many-instances'],
returncode=1,
stderr=stderr)
@pytest.mark.skipif(sys.platform == 'win32',
reason="No pseudo terminal on windows")
def test_app_add_no_tty():
proc, master = popen_tty('dcos marathon app add')
stdout, stderr = proc.communicate()
os.close(master)
print(stdout)
print(stderr)
assert proc.wait() == 1
assert stdout == b''
assert stderr == (b"We currently don't support reading from the TTY. "
b"Please specify an application JSON.\n"
b"E.g.: dcos marathon app add < app_resource.json\n")
def _list_apps(app_id=None):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'list', '--json'])
result = json.loads(stdout.decode('utf-8'))
if app_id is None:
assert len(result) == 0
else:
assert len(result) == 1
assert result[0]['id'] == '/' + app_id
assert returncode == 0
assert stderr == b''
return result
def _start_app(app_id, instances=None):
cmd = ['dcos', 'marathon', 'app', 'start', app_id]
if instances is not None:
cmd.append(str(instances))
returncode, stdout, stderr = exec_command(cmd)
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def _update_app(app_id, file_path):
with open(file_path) as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', app_id],
stdin=fd)
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def _list_versions(app_id, expected_min_count, max_count=None):
cmd = ['dcos', 'marathon', 'app', 'version', 'list', app_id]
if max_count is not None:
cmd.append('--max-count={}'.format(max_count))
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert isinstance(result, list)
assert stderr == b''
# Marathon persists app versions indefinitely by ID, so there may be extras
assert len(result) >= expected_min_count
if max_count is not None:
assert len(result) <= max_count
def _list_tasks(expected_count=None, app_id=None):
cmd = ['dcos', 'marathon', 'task', 'list', '--json']
if app_id is not None:
cmd.append(app_id)
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
if expected_count:
assert len(result) == expected_count
assert stderr == b''
return result
def _stop_task(task_id, wipe=None, expect_success=True):
cmd = ['dcos', 'marathon', 'task', 'stop', task_id]
if wipe is not None:
cmd.append('--wipe')
returncode, stdout, stderr = exec_command(cmd)
if expect_success:
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert result['id'] == task_id
else:
assert returncode == 1
@contextlib.contextmanager
def _zero_instance_app():
with app('tests/data/marathon/apps/zero_instance_sleep.json',
'zero-instance-app'):
yield
@contextlib.contextmanager
def _zero_instance_app_through_http():
class JSONRequestHandler (BaseHTTPRequestHandler):
def do_GET(self): # noqa: N802
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(open(
'tests/data/marathon/apps/zero_instance_sleep.json',
'rb').read())
host = 'localhost'
port = 12345
server = HTTPServer((host, port), JSONRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
with app('http://{}:{}'.format(host, port), 'zero-instance-app'):
try:
yield
finally:
server.shutdown()
| en | 0.722804 | # Marathon persists app versions indefinitely by ID, so pick a large # index here in case the history is long {"message":"Invalid format: \\"20:39:32.972Z\\" is malformed at \\":39:32.972Z\\""}\n { "details": [ { "errors": [ "error.expected.jsnumber" ], "path": "/cpus" } ], "message": "Invalid JSON" } # TODO (tamar): this becomes 'Error: App '/{' does not exist\n"' # in Marathon 0.11.0 { "details": [ { "errors": [ "host is not a valid network type" ], "path": "/container/docker/network" } ], "message": "Invalid JSON" } Simple sanity check for listing deployments with a table output. The more specific testing is done in unit tests. # Marathon persists app versions indefinitely by ID, so there may be extras # noqa: N802 | 1.924158 | 2 |
src/random_char_generator.py | Epicguru/PythonML | 0 | 6630416 |
def run(take_input=True, line_count=10, min_chars_per_line=10, max_chars_per_line=20):
print("Running the random character generator...")
print("")
import input_helper as ih
import random
# Take keyboard input, if required.
if take_input:
line_count = ih.get_int("Char line count:")
min_chars_per_line = ih.get_int("Min chars per line:", allow_negatives=False)
max_chars_per_line = ih.get_int("Max chars per line:", allow_negatives=False)
# Validate input.
if line_count == 0 or max_chars_per_line == 0:
print("Line count or max chars per line are 0, quitting...")
return
if min_chars_per_line > max_chars_per_line:
print("The min number of chars per line ("
+ str(min_chars_per_line) +
") cannot be larger than the max number of chars per line ("
+ str(max_chars_per_line) +
")! The minimum has been adjusted to be equal to the maximum.")
min_chars_per_line = max_chars_per_line
# Populate a char array with all 256 single-byte characters.
# Not really necessary, but allows for possible limit/pool of characters to choose from.
chars = []
for i in range(50, 100):
chars.append(chr(i))
# Generate random sequences of characters based on the input.
lines = []
for i in range(line_count):
cc = random.randrange(min_chars_per_line, max_chars_per_line)
line = ""
for j in range(cc):
line += chars[random.randrange(0, len(chars) - 1)]
lines.append(line)
# Print the lines...
for line in lines:
print(line)
# And return them
return lines
if __name__ == "__main__":
ls = run(take_input=True)
|
def run(take_input=True, line_count=10, min_chars_per_line=10, max_chars_per_line=20):
print("Running the random character generator...")
print("")
import input_helper as ih
import random
# Take keyboard input, if required.
if take_input:
line_count = ih.get_int("Char line count:")
min_chars_per_line = ih.get_int("Min chars per line:", allow_negatives=False)
max_chars_per_line = ih.get_int("Max chars per line:", allow_negatives=False)
# Validate input.
if line_count == 0 or max_chars_per_line == 0:
print("Line count or max chars per line are 0, quitting...")
return
if min_chars_per_line > max_chars_per_line:
print("The min number of chars per line ("
+ str(min_chars_per_line) +
") cannot be larger than the max number of chars per line ("
+ str(max_chars_per_line) +
")! The minimum has been adjusted to be equal to the maximum.")
min_chars_per_line = max_chars_per_line
# Populate a char array with all 256 single-byte characters.
# Not really necessary, but allows for possible limit/pool of characters to choose from.
chars = []
for i in range(50, 100):
chars.append(chr(i))
# Generate random sequences of characters based on the input.
lines = []
for i in range(line_count):
cc = random.randrange(min_chars_per_line, max_chars_per_line)
line = ""
for j in range(cc):
line += chars[random.randrange(0, len(chars) - 1)]
lines.append(line)
# Print the lines...
for line in lines:
print(line)
# And return them
return lines
if __name__ == "__main__":
ls = run(take_input=True)
| en | 0.755641 | # Take keyboard input, if required. # Validate input. # Populate a char array with all 256 single-byte characters. # Not really necessary, but allows for possible limit/pool of characters to choose from. # Generate random sequences of characters based on the input. # Print the lines... # And return them | 4.113235 | 4 |
Year-2/Computational-math/src/labs/lab_1/iteration.py | zubrailx/University-ITMO | 3 | 6630417 | <reponame>zubrailx/University-ITMO<gh_stars>1-10
from copy import copy
from modules.matrix import Matrix
def solve_iterate(ma, mb, mx, diff):
it_count = 0
matrix_x = Matrix().init(mx)
matrix_x_prev = matrix_x
while (calculate_difference(matrix_x_prev, matrix_x) >= diff or it_count == 0):
matrix_x_prev = matrix_x
matrix_x = iterate(ma, mb, matrix_x)
it_count += 1
return (matrix_x, it_count, matrix_x - matrix_x_prev)
def iterate(matrix_a, matrix_b, matrix_x):
new_matrix_x = Matrix(matrix_x.rows, matrix_x.columns)
for i in range(matrix_a.rows):
row = copy(matrix_a[i])
x_main = - row[i]
new_matrix_x[i][0] = matrix_b[i][0] / (-x_main)
row[i] = 0
for j in range(matrix_a.columns):
new_matrix_x[i][0] += row[j] / x_main * matrix_x[j][0]
return new_matrix_x
def calculate_difference(matrix_prev, matrix_next):
if (matrix_prev.rows == matrix_next.rows and matrix_prev.columns == matrix_next.columns):
return _calculate_diff_evkl(matrix_prev, matrix_next)
else:
raise Exception("ERROR: compared matrices have different dimensions")
def _calculate_diff_evkl(m1, m2):
result = 0.0
for i in range(m1.rows):
for j in range(m1.columns):
result += (m1[i][j] - m2[i][j]) ** 2
return result ** 0.5
| from copy import copy
from modules.matrix import Matrix
def solve_iterate(ma, mb, mx, diff):
it_count = 0
matrix_x = Matrix().init(mx)
matrix_x_prev = matrix_x
while (calculate_difference(matrix_x_prev, matrix_x) >= diff or it_count == 0):
matrix_x_prev = matrix_x
matrix_x = iterate(ma, mb, matrix_x)
it_count += 1
return (matrix_x, it_count, matrix_x - matrix_x_prev)
def iterate(matrix_a, matrix_b, matrix_x):
new_matrix_x = Matrix(matrix_x.rows, matrix_x.columns)
for i in range(matrix_a.rows):
row = copy(matrix_a[i])
x_main = - row[i]
new_matrix_x[i][0] = matrix_b[i][0] / (-x_main)
row[i] = 0
for j in range(matrix_a.columns):
new_matrix_x[i][0] += row[j] / x_main * matrix_x[j][0]
return new_matrix_x
def calculate_difference(matrix_prev, matrix_next):
if (matrix_prev.rows == matrix_next.rows and matrix_prev.columns == matrix_next.columns):
return _calculate_diff_evkl(matrix_prev, matrix_next)
else:
raise Exception("ERROR: compared matrices have different dimensions")
def _calculate_diff_evkl(m1, m2):
result = 0.0
for i in range(m1.rows):
for j in range(m1.columns):
result += (m1[i][j] - m2[i][j]) ** 2
return result ** 0.5 | none | 1 | 3.033485 | 3 |
|
tools/accuracy_checker/accuracy_checker/representation/quality_assessment.py | APrigarina/open_model_zoo | 1,031 | 6630418 | """
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .base_representation import BaseRepresentation
class QualityAssessment(BaseRepresentation):
pass
class QualityAssessmentAnnotation(QualityAssessment):
def __init__(self, identifier, quality_score=None):
super().__init__(identifier)
self.quality_score = quality_score
class QualityAssessmentPrediction(QualityAssessment):
def __init__(self, identifier, quality_assessment, quality_score=0):
super().__init__(identifier)
self.quality_assessment = quality_assessment
self.quality_score = quality_score
| """
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .base_representation import BaseRepresentation
class QualityAssessment(BaseRepresentation):
pass
class QualityAssessmentAnnotation(QualityAssessment):
def __init__(self, identifier, quality_score=None):
super().__init__(identifier)
self.quality_score = quality_score
class QualityAssessmentPrediction(QualityAssessment):
def __init__(self, identifier, quality_assessment, quality_score=0):
super().__init__(identifier)
self.quality_assessment = quality_assessment
self.quality_score = quality_score
| en | 0.850136 | Copyright (c) 2018-2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 2.114239 | 2 |
xrypto/markets/_binance.py | AlphaPerfect/Xrypto | 10 | 6630419 | # Copyright (C) 2017, Philsong <<EMAIL>>
import logging
import requests
from .market import Market
from binance.client import Client
class Binance(Market):
def __init__(self, pair_code):
base_currency, market_currency = self.get_tradeable_pairs(pair_code)
super().__init__(base_currency, market_currency, pair_code, 0.001)
self.client = Client(None, None)
def update_depth(self):
raw_depth = self.client.get_order_book(symbol=self.pair_code)
self.depth = self.format_depth(raw_depth)
def get_tradeable_pairs(self, pair_code):
if pair_code == 'ETHBTC':
base_currency = 'BTC'
market_currency = 'ETH'
elif pair_code == 'BNBBTC':
base_currency = 'BTC'
market_currency = 'BNB'
elif pair_code == 'BNBETH':
base_currency = 'ETH'
market_currency = 'BNB'
elif pair_code == 'MCOBTC':
base_currency = 'BTC'
market_currency = 'MCO'
elif pair_code == 'MCOETH':
base_currency = 'ETH'
market_currency = 'MCO'
elif pair_code == 'QTUMBTC':
base_currency = 'BTC'
market_currency = 'QTUMBCH'
elif pair_code == 'QTUMETH':
base_currency = 'ETH'
market_currency = 'QTUM'
elif pair_code == 'WTCBTC':
base_currency = 'BTC'
market_currency = 'WTC'
elif pair_code == 'WTCETH':
base_currency = 'ETH'
market_currency = 'WTC'
else:
assert(False)
return base_currency, market_currency | # Copyright (C) 2017, Philsong <<EMAIL>>
import logging
import requests
from .market import Market
from binance.client import Client
class Binance(Market):
def __init__(self, pair_code):
base_currency, market_currency = self.get_tradeable_pairs(pair_code)
super().__init__(base_currency, market_currency, pair_code, 0.001)
self.client = Client(None, None)
def update_depth(self):
raw_depth = self.client.get_order_book(symbol=self.pair_code)
self.depth = self.format_depth(raw_depth)
def get_tradeable_pairs(self, pair_code):
if pair_code == 'ETHBTC':
base_currency = 'BTC'
market_currency = 'ETH'
elif pair_code == 'BNBBTC':
base_currency = 'BTC'
market_currency = 'BNB'
elif pair_code == 'BNBETH':
base_currency = 'ETH'
market_currency = 'BNB'
elif pair_code == 'MCOBTC':
base_currency = 'BTC'
market_currency = 'MCO'
elif pair_code == 'MCOETH':
base_currency = 'ETH'
market_currency = 'MCO'
elif pair_code == 'QTUMBTC':
base_currency = 'BTC'
market_currency = 'QTUMBCH'
elif pair_code == 'QTUMETH':
base_currency = 'ETH'
market_currency = 'QTUM'
elif pair_code == 'WTCBTC':
base_currency = 'BTC'
market_currency = 'WTC'
elif pair_code == 'WTCETH':
base_currency = 'ETH'
market_currency = 'WTC'
else:
assert(False)
return base_currency, market_currency | en | 0.487073 | # Copyright (C) 2017, Philsong <<EMAIL>> | 2.571401 | 3 |
homeassistant/components/vacuum/demo.py | sara0871/laughing--barnacle- | 1 | 6630420 | """
Demo platform for the vacuum component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
import logging
from homeassistant.components.vacuum import (
ATTR_CLEANED_AREA, SUPPORT_BATTERY, SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED, SUPPORT_LOCATE, SUPPORT_PAUSE, SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND, SUPPORT_STATUS, SUPPORT_STOP, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_STATE, SUPPORT_START, STATE_CLEANING,
STATE_DOCKED, STATE_IDLE, STATE_PAUSED, STATE_RETURNING, VacuumDevice,
StateVacuumDevice)
_LOGGER = logging.getLogger(__name__)
SUPPORT_MINIMAL_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF
SUPPORT_BASIC_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_STATUS | SUPPORT_BATTERY
SUPPORT_MOST_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_STOP | \
SUPPORT_RETURN_HOME | SUPPORT_STATUS | SUPPORT_BATTERY
SUPPORT_ALL_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PAUSE | \
SUPPORT_STOP | SUPPORT_RETURN_HOME | \
SUPPORT_FAN_SPEED | SUPPORT_SEND_COMMAND | \
SUPPORT_LOCATE | SUPPORT_STATUS | SUPPORT_BATTERY | \
SUPPORT_CLEAN_SPOT
SUPPORT_STATE_SERVICES = SUPPORT_STATE | SUPPORT_PAUSE | SUPPORT_STOP | \
SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | \
SUPPORT_BATTERY | SUPPORT_CLEAN_SPOT | SUPPORT_START
FAN_SPEEDS = ['min', 'medium', 'high', 'max']
DEMO_VACUUM_COMPLETE = '0_Ground_floor'
DEMO_VACUUM_MOST = '1_First_floor'
DEMO_VACUUM_BASIC = '2_Second_floor'
DEMO_VACUUM_MINIMAL = '3_Third_floor'
DEMO_VACUUM_NONE = '4_Fourth_floor'
DEMO_VACUUM_STATE = '5_Fifth_floor'
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Demo vacuums."""
add_devices([
DemoVacuum(DEMO_VACUUM_COMPLETE, SUPPORT_ALL_SERVICES),
DemoVacuum(DEMO_VACUUM_MOST, SUPPORT_MOST_SERVICES),
DemoVacuum(DEMO_VACUUM_BASIC, SUPPORT_BASIC_SERVICES),
DemoVacuum(DEMO_VACUUM_MINIMAL, SUPPORT_MINIMAL_SERVICES),
DemoVacuum(DEMO_VACUUM_NONE, 0),
StateDemoVacuum(DEMO_VACUUM_STATE),
])
class DemoVacuum(VacuumDevice):
"""Representation of a demo vacuum."""
def __init__(self, name, supported_features):
"""Initialize the vacuum."""
self._name = name
self._supported_features = supported_features
self._state = False
self._status = 'Charging'
self._fan_speed = FAN_SPEEDS[1]
self._cleaned_area = 0
self._battery_level = 100
@property
def name(self):
"""Return the name of the vacuum."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo vacuum."""
return False
@property
def is_on(self):
"""Return true if vacuum is on."""
return self._state
@property
def status(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_STATUS == 0:
return
return self._status
@property
def fan_speed(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
return self._fan_speed
@property
def fan_speed_list(self):
"""Return the status of the vacuum."""
assert self.supported_features & SUPPORT_FAN_SPEED != 0
return FAN_SPEEDS
@property
def battery_level(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_BATTERY == 0:
return
return max(0, min(100, self._battery_level))
@property
def device_state_attributes(self):
"""Return device state attributes."""
return {ATTR_CLEANED_AREA: round(self._cleaned_area, 2)}
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
def turn_on(self, **kwargs):
"""Turn the vacuum on."""
if self.supported_features & SUPPORT_TURN_ON == 0:
return
self._state = True
self._cleaned_area += 5.32
self._battery_level -= 2
self._status = 'Cleaning'
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the vacuum off."""
if self.supported_features & SUPPORT_TURN_OFF == 0:
return
self._state = False
self._status = 'Charging'
self.schedule_update_ha_state()
def stop(self, **kwargs):
"""Stop the vacuum."""
if self.supported_features & SUPPORT_STOP == 0:
return
self._state = False
self._status = 'Stopping the current task'
self.schedule_update_ha_state()
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
if self.supported_features & SUPPORT_CLEAN_SPOT == 0:
return
self._state = True
self._cleaned_area += 1.32
self._battery_level -= 1
self._status = "Cleaning spot"
self.schedule_update_ha_state()
def locate(self, **kwargs):
"""Locate the vacuum (usually by playing a song)."""
if self.supported_features & SUPPORT_LOCATE == 0:
return
self._status = "Hi, I'm over here!"
self.schedule_update_ha_state()
def start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
if self.supported_features & SUPPORT_PAUSE == 0:
return
self._state = not self._state
if self._state:
self._status = 'Resuming the current task'
self._cleaned_area += 1.32
self._battery_level -= 1
else:
self._status = 'Pausing the current task'
self.schedule_update_ha_state()
def set_fan_speed(self, fan_speed, **kwargs):
"""Set the vacuum's fan speed."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
if fan_speed in self.fan_speed_list:
self._fan_speed = fan_speed
self.schedule_update_ha_state()
def return_to_base(self, **kwargs):
"""Tell the vacuum to return to its dock."""
if self.supported_features & SUPPORT_RETURN_HOME == 0:
return
self._state = False
self._status = 'Returning home...'
self._battery_level += 5
self.schedule_update_ha_state()
def send_command(self, command, params=None, **kwargs):
"""Send a command to the vacuum."""
if self.supported_features & SUPPORT_SEND_COMMAND == 0:
return
self._status = 'Executing {}({})'.format(command, params)
self._state = True
self.schedule_update_ha_state()
class StateDemoVacuum(StateVacuumDevice):
"""Representation of a demo vacuum supporting states."""
def __init__(self, name):
"""Initialize the vacuum."""
self._name = name
self._supported_features = SUPPORT_STATE_SERVICES
self._state = STATE_DOCKED
self._fan_speed = FAN_SPEEDS[1]
self._cleaned_area = 0
self._battery_level = 100
@property
def name(self):
"""Return the name of the vacuum."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo vacuum."""
return False
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
@property
def state(self):
"""Return the current state of the vacuum."""
return self._state
@property
def battery_level(self):
"""Return the current battery level of the vacuum."""
if self.supported_features & SUPPORT_BATTERY == 0:
return
return max(0, min(100, self._battery_level))
@property
def fan_speed(self):
"""Return the current fan speed of the vacuum."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
return self._fan_speed
@property
def fan_speed_list(self):
"""Return the list of supported fan speeds."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
return FAN_SPEEDS
@property
def device_state_attributes(self):
"""Return device state attributes."""
return {ATTR_CLEANED_AREA: round(self._cleaned_area, 2)}
def start(self):
"""Start or resume the cleaning task."""
if self.supported_features & SUPPORT_START == 0:
return
if self._state != STATE_CLEANING:
self._state = STATE_CLEANING
self._cleaned_area += 1.32
self._battery_level -= 1
self.schedule_update_ha_state()
def pause(self):
"""Pause the cleaning task."""
if self.supported_features & SUPPORT_PAUSE == 0:
return
if self._state == STATE_CLEANING:
self._state = STATE_PAUSED
self.schedule_update_ha_state()
def stop(self, **kwargs):
"""Stop the cleaning task, do not return to dock."""
if self.supported_features & SUPPORT_STOP == 0:
return
self._state = STATE_IDLE
self.schedule_update_ha_state()
def return_to_base(self, **kwargs):
"""Return dock to charging base."""
if self.supported_features & SUPPORT_RETURN_HOME == 0:
return
self._state = STATE_RETURNING
self.schedule_update_ha_state()
self.hass.loop.call_later(30, self.__set_state_to_dock)
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
if self.supported_features & SUPPORT_CLEAN_SPOT == 0:
return
self._state = STATE_CLEANING
self._cleaned_area += 1.32
self._battery_level -= 1
self.schedule_update_ha_state()
def set_fan_speed(self, fan_speed, **kwargs):
"""Set the vacuum's fan speed."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
if fan_speed in self.fan_speed_list:
self._fan_speed = fan_speed
self.schedule_update_ha_state()
def __set_state_to_dock(self):
self._state = STATE_DOCKED
self.schedule_update_ha_state()
| """
Demo platform for the vacuum component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
import logging
from homeassistant.components.vacuum import (
ATTR_CLEANED_AREA, SUPPORT_BATTERY, SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED, SUPPORT_LOCATE, SUPPORT_PAUSE, SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND, SUPPORT_STATUS, SUPPORT_STOP, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_STATE, SUPPORT_START, STATE_CLEANING,
STATE_DOCKED, STATE_IDLE, STATE_PAUSED, STATE_RETURNING, VacuumDevice,
StateVacuumDevice)
_LOGGER = logging.getLogger(__name__)
SUPPORT_MINIMAL_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF
SUPPORT_BASIC_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_STATUS | SUPPORT_BATTERY
SUPPORT_MOST_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_STOP | \
SUPPORT_RETURN_HOME | SUPPORT_STATUS | SUPPORT_BATTERY
SUPPORT_ALL_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PAUSE | \
SUPPORT_STOP | SUPPORT_RETURN_HOME | \
SUPPORT_FAN_SPEED | SUPPORT_SEND_COMMAND | \
SUPPORT_LOCATE | SUPPORT_STATUS | SUPPORT_BATTERY | \
SUPPORT_CLEAN_SPOT
SUPPORT_STATE_SERVICES = SUPPORT_STATE | SUPPORT_PAUSE | SUPPORT_STOP | \
SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | \
SUPPORT_BATTERY | SUPPORT_CLEAN_SPOT | SUPPORT_START
FAN_SPEEDS = ['min', 'medium', 'high', 'max']
DEMO_VACUUM_COMPLETE = '0_Ground_floor'
DEMO_VACUUM_MOST = '1_First_floor'
DEMO_VACUUM_BASIC = '2_Second_floor'
DEMO_VACUUM_MINIMAL = '3_Third_floor'
DEMO_VACUUM_NONE = '4_Fourth_floor'
DEMO_VACUUM_STATE = '5_Fifth_floor'
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Demo vacuums."""
add_devices([
DemoVacuum(DEMO_VACUUM_COMPLETE, SUPPORT_ALL_SERVICES),
DemoVacuum(DEMO_VACUUM_MOST, SUPPORT_MOST_SERVICES),
DemoVacuum(DEMO_VACUUM_BASIC, SUPPORT_BASIC_SERVICES),
DemoVacuum(DEMO_VACUUM_MINIMAL, SUPPORT_MINIMAL_SERVICES),
DemoVacuum(DEMO_VACUUM_NONE, 0),
StateDemoVacuum(DEMO_VACUUM_STATE),
])
class DemoVacuum(VacuumDevice):
"""Representation of a demo vacuum."""
def __init__(self, name, supported_features):
"""Initialize the vacuum."""
self._name = name
self._supported_features = supported_features
self._state = False
self._status = 'Charging'
self._fan_speed = FAN_SPEEDS[1]
self._cleaned_area = 0
self._battery_level = 100
@property
def name(self):
"""Return the name of the vacuum."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo vacuum."""
return False
@property
def is_on(self):
"""Return true if vacuum is on."""
return self._state
@property
def status(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_STATUS == 0:
return
return self._status
@property
def fan_speed(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
return self._fan_speed
@property
def fan_speed_list(self):
"""Return the status of the vacuum."""
assert self.supported_features & SUPPORT_FAN_SPEED != 0
return FAN_SPEEDS
@property
def battery_level(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_BATTERY == 0:
return
return max(0, min(100, self._battery_level))
@property
def device_state_attributes(self):
"""Return device state attributes."""
return {ATTR_CLEANED_AREA: round(self._cleaned_area, 2)}
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
def turn_on(self, **kwargs):
"""Turn the vacuum on."""
if self.supported_features & SUPPORT_TURN_ON == 0:
return
self._state = True
self._cleaned_area += 5.32
self._battery_level -= 2
self._status = 'Cleaning'
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the vacuum off."""
if self.supported_features & SUPPORT_TURN_OFF == 0:
return
self._state = False
self._status = 'Charging'
self.schedule_update_ha_state()
def stop(self, **kwargs):
"""Stop the vacuum."""
if self.supported_features & SUPPORT_STOP == 0:
return
self._state = False
self._status = 'Stopping the current task'
self.schedule_update_ha_state()
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
if self.supported_features & SUPPORT_CLEAN_SPOT == 0:
return
self._state = True
self._cleaned_area += 1.32
self._battery_level -= 1
self._status = "Cleaning spot"
self.schedule_update_ha_state()
def locate(self, **kwargs):
"""Locate the vacuum (usually by playing a song)."""
if self.supported_features & SUPPORT_LOCATE == 0:
return
self._status = "Hi, I'm over here!"
self.schedule_update_ha_state()
def start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
if self.supported_features & SUPPORT_PAUSE == 0:
return
self._state = not self._state
if self._state:
self._status = 'Resuming the current task'
self._cleaned_area += 1.32
self._battery_level -= 1
else:
self._status = 'Pausing the current task'
self.schedule_update_ha_state()
def set_fan_speed(self, fan_speed, **kwargs):
"""Set the vacuum's fan speed."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
if fan_speed in self.fan_speed_list:
self._fan_speed = fan_speed
self.schedule_update_ha_state()
def return_to_base(self, **kwargs):
"""Tell the vacuum to return to its dock."""
if self.supported_features & SUPPORT_RETURN_HOME == 0:
return
self._state = False
self._status = 'Returning home...'
self._battery_level += 5
self.schedule_update_ha_state()
def send_command(self, command, params=None, **kwargs):
"""Send a command to the vacuum."""
if self.supported_features & SUPPORT_SEND_COMMAND == 0:
return
self._status = 'Executing {}({})'.format(command, params)
self._state = True
self.schedule_update_ha_state()
class StateDemoVacuum(StateVacuumDevice):
"""Representation of a demo vacuum supporting states."""
def __init__(self, name):
"""Initialize the vacuum."""
self._name = name
self._supported_features = SUPPORT_STATE_SERVICES
self._state = STATE_DOCKED
self._fan_speed = FAN_SPEEDS[1]
self._cleaned_area = 0
self._battery_level = 100
@property
def name(self):
"""Return the name of the vacuum."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo vacuum."""
return False
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
@property
def state(self):
"""Return the current state of the vacuum."""
return self._state
@property
def battery_level(self):
"""Return the current battery level of the vacuum."""
if self.supported_features & SUPPORT_BATTERY == 0:
return
return max(0, min(100, self._battery_level))
@property
def fan_speed(self):
"""Return the current fan speed of the vacuum."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
return self._fan_speed
@property
def fan_speed_list(self):
"""Return the list of supported fan speeds."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
return FAN_SPEEDS
@property
def device_state_attributes(self):
"""Return device state attributes."""
return {ATTR_CLEANED_AREA: round(self._cleaned_area, 2)}
def start(self):
"""Start or resume the cleaning task."""
if self.supported_features & SUPPORT_START == 0:
return
if self._state != STATE_CLEANING:
self._state = STATE_CLEANING
self._cleaned_area += 1.32
self._battery_level -= 1
self.schedule_update_ha_state()
def pause(self):
"""Pause the cleaning task."""
if self.supported_features & SUPPORT_PAUSE == 0:
return
if self._state == STATE_CLEANING:
self._state = STATE_PAUSED
self.schedule_update_ha_state()
def stop(self, **kwargs):
"""Stop the cleaning task, do not return to dock."""
if self.supported_features & SUPPORT_STOP == 0:
return
self._state = STATE_IDLE
self.schedule_update_ha_state()
def return_to_base(self, **kwargs):
"""Return dock to charging base."""
if self.supported_features & SUPPORT_RETURN_HOME == 0:
return
self._state = STATE_RETURNING
self.schedule_update_ha_state()
self.hass.loop.call_later(30, self.__set_state_to_dock)
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
if self.supported_features & SUPPORT_CLEAN_SPOT == 0:
return
self._state = STATE_CLEANING
self._cleaned_area += 1.32
self._battery_level -= 1
self.schedule_update_ha_state()
def set_fan_speed(self, fan_speed, **kwargs):
"""Set the vacuum's fan speed."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
if fan_speed in self.fan_speed_list:
self._fan_speed = fan_speed
self.schedule_update_ha_state()
def __set_state_to_dock(self):
self._state = STATE_DOCKED
self.schedule_update_ha_state()
| en | 0.830909 | Demo platform for the vacuum component. For more details about this platform, please refer to the documentation https://home-assistant.io/components/demo/ Set up the Demo vacuums. Representation of a demo vacuum. Initialize the vacuum. Return the name of the vacuum. No polling needed for a demo vacuum. Return true if vacuum is on. Return the status of the vacuum. Return the status of the vacuum. Return the status of the vacuum. Return the status of the vacuum. Return device state attributes. Flag supported features. Turn the vacuum on. Turn the vacuum off. Stop the vacuum. Perform a spot clean-up. Locate the vacuum (usually by playing a song). Start, pause or resume the cleaning task. Set the vacuum's fan speed. Tell the vacuum to return to its dock. Send a command to the vacuum. Representation of a demo vacuum supporting states. Initialize the vacuum. Return the name of the vacuum. No polling needed for a demo vacuum. Flag supported features. Return the current state of the vacuum. Return the current battery level of the vacuum. Return the current fan speed of the vacuum. Return the list of supported fan speeds. Return device state attributes. Start or resume the cleaning task. Pause the cleaning task. Stop the cleaning task, do not return to dock. Return dock to charging base. Perform a spot clean-up. Set the vacuum's fan speed. | 2.097162 | 2 |
views/base_api.py | Astlo/CaLiSoft | 1 | 6630421 | <reponame>Astlo/CaLiSoft
from django.http.response import HttpResponse
from django.views.decorators.http import require_http_methods
from neomodel import UniqueProperty, DoesNotExist
import json
from objectmodels.Dataset import Dataset
from objectmodels.License import License
from neomodels import NeoFactory, ObjectFactory
from neomodels.NeoModels import LicenseModel, DatasetModel, license_filter_labels, dataset_filter_search, license_filter_sets, get_leaf_licenses, get_compliant_licenses, get_compatible_licenses
from utils.TimerDecorator import fn_timer
from utils.authentificator import need_auth
from utils import D3jsData
from utils import Constraints
@require_http_methods(['GET', 'POST'])
def license_path(request):
if request.method == 'GET':
return get_licenses(request)
elif request.method == 'POST':
return add_license(request)
@require_http_methods(['GET', 'POST'])
def dataset_path(request):
if request.method == 'GET':
return get_datasets(request)
elif request.method == 'POST':
return add_dataset(request)
def get_licenses(request):
response_content = []
for neo_license in LicenseModel.nodes:
license_object = ObjectFactory.objectLicense(neo_license)
response_content.append(license_object.to_json())
response = HttpResponse(
json.dumps(response_content),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
def get_datasets(request):
response_content = []
for neo_dataset in DatasetModel.nodes:
dataset_object = ObjectFactory.objectDataset(neo_dataset)
response_content.append(dataset_object.to_json())
response = HttpResponse(
json.dumps(response_content),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
@need_auth
def add_dataset(request):
json_dataset = json.loads(request.body)
object_dataset = Dataset()
object_dataset.from_json(json_dataset)
neo_dataset = NeoFactory.NeoDataset(object_dataset)
object_dataset = ObjectFactory.objectDataset(neo_dataset)
try:
neo_dataset.save()
response = HttpResponse(
json.dumps(object_dataset.to_json()),
content_type='application/json',
status=201,
)
except UniqueProperty:
response = HttpResponse(
json.dumps(object_dataset.to_json()),
content_type='application/json',
status=409,
)
response['Access-Control-Allow-Origin'] = '*'
return response
def get_license_by_hash(request, hashed_sets):
try:
neo_license = LicenseModel.nodes.get(hashed_sets=hashed_sets)
license_object = ObjectFactory.objectLicense(neo_license)
response = HttpResponse(
json.dumps(license_object.to_json()),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"{}",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
def get_dataset_by_hash(request, hashed_uri):
try:
neo_dataset = DatasetModel.nodes.get(hashed_uri=hashed_uri)
dataset_object = ObjectFactory.objectDataset(neo_dataset)
response = HttpResponse(
json.dumps(dataset_object.to_json()),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"{}",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
def get_license_search(request):
query = request.GET.get('query', None)
label = request.GET.get('label', None)
permissions = request.GET.get('permissions', None)
if is_empty(permissions):
permissions = None
obligations = request.GET.get('obligations', None)
if is_empty(obligations):
obligations = None
prohibitions = request.GET.get('prohibitions', None)
if is_empty(prohibitions):
prohibitions = None
neo_licenses = LicenseModel.nodes
if query:
neo_licenses = license_filter_labels(query)
else:
if label:
neo_licenses = license_filter_labels(label)
if permissions:
neo_licenses = license_filter_sets(permissions, 'permissions')
if obligations:
neo_licenses = license_filter_sets(obligations, 'obligations')
if prohibitions:
neo_licenses = license_filter_sets(prohibitions, 'prohibitions')
response_content = []
for neo_license in neo_licenses:
license_object = ObjectFactory.objectLicense(neo_license)
response_content.append(license_object.to_json())
response = HttpResponse(
json.dumps(response_content),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
def get_dataset_search(request):
query = request.GET.get('query', None)
label = request.GET.get('label', None)
descr = request.GET.get('descr', None)
uri = request.GET.get('uri', None)
neo_datasets = DatasetModel.nodes
if query:
neo_datasets = dataset_filter_search(query)
else:
if label:
neo_datasets = neo_datasets.filter(label__icontains=label)
if uri:
neo_datasets = neo_datasets.filter(uri__icontains=uri)
if descr:
neo_datasets = neo_datasets.filter(description__icontains=descr)
response_content = []
for neo_dataset in neo_datasets:
dataset_object = ObjectFactory.objectDataset(neo_dataset)
response_content.append(dataset_object.to_json())
response = HttpResponse(
json.dumps(response_content),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
def get_datasets_of_licenses(request, hashed_sets):
try:
neo_license = LicenseModel.nodes.get(hashed_sets=hashed_sets)
license_datasets = []
for dataset in neo_license.datasets.all():
dataset_object = ObjectFactory.objectDataset(dataset)
license_datasets.append(dataset_object.to_json())
response = HttpResponse(
json.dumps(license_datasets),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"[]",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
def is_empty(str_list):
if str_list is not None:
if str_list.replace(' ', '').replace('[', '').replace(']', '').split(',')[0] == '':
return True
return False
@need_auth
@fn_timer
def add_license(request):
json_licenses = json.loads(request.body)
added_licenses = []
for json_license in json_licenses:
object_license = License()
object_license.from_json(json_license)
if object_license.contains_only_odrl_actions():
if Constraints.is_license_viable(object_license):
object_license = add_license_to_db(object_license)
added_licenses.append(object_license.to_json())
else:
added_licenses.append("Not a valid license: License is non-viable")
else:
added_licenses.append("Not a valid license: Use only ODRL actions")
response = HttpResponse(
json.dumps(added_licenses),
content_type='application/json',
status=201,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@fn_timer
def add_license_to_db(object_license):
neo_license = LicenseModel.nodes.get_or_none(hashed_sets=object_license.hash())
if neo_license:
# update of labels list if needed
neo_license.labels = list(set(object_license.get_labels()).union(neo_license.labels))
neo_license.save()
else:
# license does not exists in db
license_leaves = get_leaf_licenses()
neo_license = NeoFactory.NeoLicense(object_license)
neo_license.save()
for neo_license_leaf in license_leaves:
object_license_leaf = ObjectFactory.objectLicense(neo_license_leaf)
if object_license.is_preceding(object_license_leaf):
if Constraints.is_compatibility_viable(object_license, object_license_leaf):
neo_license_leaf.precedings.connect(neo_license)
else:
update_licenses_relations_rec(neo_license, object_license, neo_license_leaf, object_license_leaf)
for dataset in object_license.get_datasets():
neo_dataset = DatasetModel.nodes.get_or_none(hashed_uri=dataset.hash())
if not neo_dataset:
neo_dataset = NeoFactory.NeoDataset(dataset)
neo_dataset.save()
neo_license.datasets.connect(neo_dataset)
object_license = ObjectFactory.objectLicense(neo_license)
return object_license
def update_licenses_relations_rec(new_neo_license, new_object_license, neo_license, object_license):
# update precedings and followings of license recursively.
grand_follower = False
for neo_license_following in neo_license.followings:
object_license_following = ObjectFactory.objectLicense(neo_license_following)
if new_object_license.is_following(object_license_following):
# new license is a follower of a following
grand_follower = True
if new_object_license.is_preceding(object_license_following):
if Constraints.is_compatibility_viable(new_object_license, object_license_following):
neo_license_following.precedings.connect(new_neo_license)
if new_object_license.is_following(object_license):
# new_license is between license and its following_license.
if Constraints.is_compatibility_viable(object_license, new_object_license):
neo_license.followings.connect(new_neo_license)
neo_license.followings.disconnect(neo_license_following)
else:
update_licenses_relations_rec(new_neo_license, new_object_license, neo_license_following, object_license_following)
if not grand_follower and new_object_license.is_following(object_license):
# then its just the next follower of the current license
if Constraints.is_compatibility_viable(object_license, new_object_license):
neo_license.followings.connect(new_neo_license)
@fn_timer
@require_http_methods(['GET'])
def get_compliant(request, hashed_sets):
try:
neo_licenses = get_compatible_licenses(hashed_sets)
compatible_licenses = []
for neo_license in neo_licenses:
license_object = ObjectFactory.objectLicense(neo_license)
compatible_licenses.append(license_object.to_json())
response = HttpResponse(
json.dumps(compatible_licenses),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"[]",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@fn_timer
@require_http_methods(['GET'])
def get_compatible(request, hashed_sets):
try:
neo_licenses = get_compliant_licenses(hashed_sets)
compatible_licenses = []
for neo_license in neo_licenses:
license_object = ObjectFactory.objectLicense(neo_license)
compatible_licenses.append(license_object.to_json())
response = HttpResponse(
json.dumps(compatible_licenses),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"[]",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@fn_timer
@require_http_methods(['GET'])
def get_graph(request):
nodes = []
links = []
for neo_license in LicenseModel.nodes:
license_object = ObjectFactory.objectLicense(neo_license)
nodes.append(D3jsData.license_node(license_object))
for neo_dataset in neo_license.datasets.all():
dataset_object = ObjectFactory.objectDataset(neo_dataset)
nodes.append(D3jsData.dataset_node(dataset_object))
links.append(D3jsData.dataset_link(license_object, dataset_object))
for compatible_neo_license in neo_license.followings.all():
compatible_license_object = ObjectFactory.objectLicense(compatible_neo_license)
links.append(D3jsData.compatible_link(license_object, compatible_license_object))
response = HttpResponse(
json.dumps(D3jsData.graph(nodes, links)),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
| from django.http.response import HttpResponse
from django.views.decorators.http import require_http_methods
from neomodel import UniqueProperty, DoesNotExist
import json
from objectmodels.Dataset import Dataset
from objectmodels.License import License
from neomodels import NeoFactory, ObjectFactory
from neomodels.NeoModels import LicenseModel, DatasetModel, license_filter_labels, dataset_filter_search, license_filter_sets, get_leaf_licenses, get_compliant_licenses, get_compatible_licenses
from utils.TimerDecorator import fn_timer
from utils.authentificator import need_auth
from utils import D3jsData
from utils import Constraints
@require_http_methods(['GET', 'POST'])
def license_path(request):
if request.method == 'GET':
return get_licenses(request)
elif request.method == 'POST':
return add_license(request)
@require_http_methods(['GET', 'POST'])
def dataset_path(request):
if request.method == 'GET':
return get_datasets(request)
elif request.method == 'POST':
return add_dataset(request)
def get_licenses(request):
response_content = []
for neo_license in LicenseModel.nodes:
license_object = ObjectFactory.objectLicense(neo_license)
response_content.append(license_object.to_json())
response = HttpResponse(
json.dumps(response_content),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
def get_datasets(request):
response_content = []
for neo_dataset in DatasetModel.nodes:
dataset_object = ObjectFactory.objectDataset(neo_dataset)
response_content.append(dataset_object.to_json())
response = HttpResponse(
json.dumps(response_content),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
@need_auth
def add_dataset(request):
json_dataset = json.loads(request.body)
object_dataset = Dataset()
object_dataset.from_json(json_dataset)
neo_dataset = NeoFactory.NeoDataset(object_dataset)
object_dataset = ObjectFactory.objectDataset(neo_dataset)
try:
neo_dataset.save()
response = HttpResponse(
json.dumps(object_dataset.to_json()),
content_type='application/json',
status=201,
)
except UniqueProperty:
response = HttpResponse(
json.dumps(object_dataset.to_json()),
content_type='application/json',
status=409,
)
response['Access-Control-Allow-Origin'] = '*'
return response
def get_license_by_hash(request, hashed_sets):
try:
neo_license = LicenseModel.nodes.get(hashed_sets=hashed_sets)
license_object = ObjectFactory.objectLicense(neo_license)
response = HttpResponse(
json.dumps(license_object.to_json()),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"{}",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
def get_dataset_by_hash(request, hashed_uri):
try:
neo_dataset = DatasetModel.nodes.get(hashed_uri=hashed_uri)
dataset_object = ObjectFactory.objectDataset(neo_dataset)
response = HttpResponse(
json.dumps(dataset_object.to_json()),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"{}",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
def get_license_search(request):
query = request.GET.get('query', None)
label = request.GET.get('label', None)
permissions = request.GET.get('permissions', None)
if is_empty(permissions):
permissions = None
obligations = request.GET.get('obligations', None)
if is_empty(obligations):
obligations = None
prohibitions = request.GET.get('prohibitions', None)
if is_empty(prohibitions):
prohibitions = None
neo_licenses = LicenseModel.nodes
if query:
neo_licenses = license_filter_labels(query)
else:
if label:
neo_licenses = license_filter_labels(label)
if permissions:
neo_licenses = license_filter_sets(permissions, 'permissions')
if obligations:
neo_licenses = license_filter_sets(obligations, 'obligations')
if prohibitions:
neo_licenses = license_filter_sets(prohibitions, 'prohibitions')
response_content = []
for neo_license in neo_licenses:
license_object = ObjectFactory.objectLicense(neo_license)
response_content.append(license_object.to_json())
response = HttpResponse(
json.dumps(response_content),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
def get_dataset_search(request):
query = request.GET.get('query', None)
label = request.GET.get('label', None)
descr = request.GET.get('descr', None)
uri = request.GET.get('uri', None)
neo_datasets = DatasetModel.nodes
if query:
neo_datasets = dataset_filter_search(query)
else:
if label:
neo_datasets = neo_datasets.filter(label__icontains=label)
if uri:
neo_datasets = neo_datasets.filter(uri__icontains=uri)
if descr:
neo_datasets = neo_datasets.filter(description__icontains=descr)
response_content = []
for neo_dataset in neo_datasets:
dataset_object = ObjectFactory.objectDataset(neo_dataset)
response_content.append(dataset_object.to_json())
response = HttpResponse(
json.dumps(response_content),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
def get_datasets_of_licenses(request, hashed_sets):
try:
neo_license = LicenseModel.nodes.get(hashed_sets=hashed_sets)
license_datasets = []
for dataset in neo_license.datasets.all():
dataset_object = ObjectFactory.objectDataset(dataset)
license_datasets.append(dataset_object.to_json())
response = HttpResponse(
json.dumps(license_datasets),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"[]",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
def is_empty(str_list):
if str_list is not None:
if str_list.replace(' ', '').replace('[', '').replace(']', '').split(',')[0] == '':
return True
return False
@need_auth
@fn_timer
def add_license(request):
json_licenses = json.loads(request.body)
added_licenses = []
for json_license in json_licenses:
object_license = License()
object_license.from_json(json_license)
if object_license.contains_only_odrl_actions():
if Constraints.is_license_viable(object_license):
object_license = add_license_to_db(object_license)
added_licenses.append(object_license.to_json())
else:
added_licenses.append("Not a valid license: License is non-viable")
else:
added_licenses.append("Not a valid license: Use only ODRL actions")
response = HttpResponse(
json.dumps(added_licenses),
content_type='application/json',
status=201,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@fn_timer
def add_license_to_db(object_license):
neo_license = LicenseModel.nodes.get_or_none(hashed_sets=object_license.hash())
if neo_license:
# update of labels list if needed
neo_license.labels = list(set(object_license.get_labels()).union(neo_license.labels))
neo_license.save()
else:
# license does not exists in db
license_leaves = get_leaf_licenses()
neo_license = NeoFactory.NeoLicense(object_license)
neo_license.save()
for neo_license_leaf in license_leaves:
object_license_leaf = ObjectFactory.objectLicense(neo_license_leaf)
if object_license.is_preceding(object_license_leaf):
if Constraints.is_compatibility_viable(object_license, object_license_leaf):
neo_license_leaf.precedings.connect(neo_license)
else:
update_licenses_relations_rec(neo_license, object_license, neo_license_leaf, object_license_leaf)
for dataset in object_license.get_datasets():
neo_dataset = DatasetModel.nodes.get_or_none(hashed_uri=dataset.hash())
if not neo_dataset:
neo_dataset = NeoFactory.NeoDataset(dataset)
neo_dataset.save()
neo_license.datasets.connect(neo_dataset)
object_license = ObjectFactory.objectLicense(neo_license)
return object_license
def update_licenses_relations_rec(new_neo_license, new_object_license, neo_license, object_license):
# update precedings and followings of license recursively.
grand_follower = False
for neo_license_following in neo_license.followings:
object_license_following = ObjectFactory.objectLicense(neo_license_following)
if new_object_license.is_following(object_license_following):
# new license is a follower of a following
grand_follower = True
if new_object_license.is_preceding(object_license_following):
if Constraints.is_compatibility_viable(new_object_license, object_license_following):
neo_license_following.precedings.connect(new_neo_license)
if new_object_license.is_following(object_license):
# new_license is between license and its following_license.
if Constraints.is_compatibility_viable(object_license, new_object_license):
neo_license.followings.connect(new_neo_license)
neo_license.followings.disconnect(neo_license_following)
else:
update_licenses_relations_rec(new_neo_license, new_object_license, neo_license_following, object_license_following)
if not grand_follower and new_object_license.is_following(object_license):
# then its just the next follower of the current license
if Constraints.is_compatibility_viable(object_license, new_object_license):
neo_license.followings.connect(new_neo_license)
@fn_timer
@require_http_methods(['GET'])
def get_compliant(request, hashed_sets):
try:
neo_licenses = get_compatible_licenses(hashed_sets)
compatible_licenses = []
for neo_license in neo_licenses:
license_object = ObjectFactory.objectLicense(neo_license)
compatible_licenses.append(license_object.to_json())
response = HttpResponse(
json.dumps(compatible_licenses),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"[]",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@fn_timer
@require_http_methods(['GET'])
def get_compatible(request, hashed_sets):
try:
neo_licenses = get_compliant_licenses(hashed_sets)
compatible_licenses = []
for neo_license in neo_licenses:
license_object = ObjectFactory.objectLicense(neo_license)
compatible_licenses.append(license_object.to_json())
response = HttpResponse(
json.dumps(compatible_licenses),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"[]",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@fn_timer
@require_http_methods(['GET'])
def get_graph(request):
nodes = []
links = []
for neo_license in LicenseModel.nodes:
license_object = ObjectFactory.objectLicense(neo_license)
nodes.append(D3jsData.license_node(license_object))
for neo_dataset in neo_license.datasets.all():
dataset_object = ObjectFactory.objectDataset(neo_dataset)
nodes.append(D3jsData.dataset_node(dataset_object))
links.append(D3jsData.dataset_link(license_object, dataset_object))
for compatible_neo_license in neo_license.followings.all():
compatible_license_object = ObjectFactory.objectLicense(compatible_neo_license)
links.append(D3jsData.compatible_link(license_object, compatible_license_object))
response = HttpResponse(
json.dumps(D3jsData.graph(nodes, links)),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response | en | 0.922455 | # update of labels list if needed # license does not exists in db # update precedings and followings of license recursively. # new license is a follower of a following # new_license is between license and its following_license. # then its just the next follower of the current license | 1.994542 | 2 |
spaceCombat/spaceCombatApp/game/behavior.py | yuriharrison/space-combat | 0 | 6630422 | from kivy.properties import *
from . import utils
class Behavior:
def update(self):
self.apply_effects()
class CollisionDetectionBehavior(Behavior):
def __init__(self, **kw):
self._collision_ls = list()
self.register_event_type('on_collision')
super().__init__(**kw)
def update(self):
self.test_collision_nearby()
super().update()
def on_collision(self, entity, kind):
pass
def test_collision_nearby(self):
if self.nearby_entities:
for entity in self.nearby_entities:
self.test_collision(entity)
def test_collision(self, entity):
kind = None
if self.radius and entity.radius and utils.circle_collision(self, entity):
kind = 'circle'
if utils.rectangle_collision(self, entity):
if kind:
kind = 'both'
else:
kind = 'rectangle'
if kind and entity.id_ not in self._collision_ls:
self.dispatch('on_collision', entity, kind)
self._collision_ls.append(entity.id_)
elif not kind and entity.id_ in self._collision_ls:
self._collision_ls.remove(entity.id_)
class MovableBehavior(Behavior):
pace = NumericProperty(.00005)
speed = NumericProperty(0)
speed_max = NumericProperty(100)
speed_max_backwards = NumericProperty(-100)
front_angle_adjust = NumericProperty(90)
def on_speed(self, instance, value):
if self.speed > self.speed_max:
self.speed = self.speed_max
elif self.speed < self.speed_max_backwards:
self.speed = self.speed_max_backwards
def move_foward(self, distance):
angle = self.angle + self.front_angle_adjust
spos = [float(i) for i in utils.coordinates(distance, angle)]
self.spos_add(*spos)
def update(self):
distance = self.pace*self.speed
self.move_foward(distance)
super().update()
class DamageBehavior(Behavior):
damage = NumericProperty(0)
def hit(self, entity):
pass
class DamageableBehavior(Behavior):
life = NumericProperty(100)
def __init__(self, **kw):
self.bind(on_collision=self._on_collision)
super().__init__(**kw)
def _on_collision(self, i, entity, kind):
if isinstance(entity, DamageBehavior):
self.before_damage()
entity.hit(entity)
self.apply_damage(entity)
self.after_damage()
def apply_damage(self, entity):
self.life -= entity.damage
def before_damage(self):
pass
def after_damage(self):
pass
@property
def is_alive(self):
return self.life > 0
class ShooterBehavior(Behavior):
current_weapon = NumericProperty(None, allownone=True)
weapons = ListProperty(None, allownone=True)
def __init__(self, **kw):
self.register_event_type('on_shoot')
super().__init__(**kw)
def shoot(self):
if self.current_weapon is not None:
projectile = self.weapons[self.current_weapon].shoot(self)
if projectile:
self.dispatch('on_shoot', projectile)
def on_shoot(self, projectile):
pass
def add_weapon(self, weapon):
if not self.weapons:
self.current_weapon = 0
self.weapons.append(weapon)
def remove_weapon(self, weapon):
self.weapons.remove(weapon)
def next_weapon(self):
if self.current_weapon == (len(self.weapons) - 1):
self.current_weapon = 0
else:
self.current_weapon += 1
def previous_weapon(self):
if self.current_weapon == 0:
self.current_weapon = (len(self.weapons) - 1)
else:
self.current_weapon -= 1
| from kivy.properties import *
from . import utils
class Behavior:
def update(self):
self.apply_effects()
class CollisionDetectionBehavior(Behavior):
def __init__(self, **kw):
self._collision_ls = list()
self.register_event_type('on_collision')
super().__init__(**kw)
def update(self):
self.test_collision_nearby()
super().update()
def on_collision(self, entity, kind):
pass
def test_collision_nearby(self):
if self.nearby_entities:
for entity in self.nearby_entities:
self.test_collision(entity)
def test_collision(self, entity):
kind = None
if self.radius and entity.radius and utils.circle_collision(self, entity):
kind = 'circle'
if utils.rectangle_collision(self, entity):
if kind:
kind = 'both'
else:
kind = 'rectangle'
if kind and entity.id_ not in self._collision_ls:
self.dispatch('on_collision', entity, kind)
self._collision_ls.append(entity.id_)
elif not kind and entity.id_ in self._collision_ls:
self._collision_ls.remove(entity.id_)
class MovableBehavior(Behavior):
pace = NumericProperty(.00005)
speed = NumericProperty(0)
speed_max = NumericProperty(100)
speed_max_backwards = NumericProperty(-100)
front_angle_adjust = NumericProperty(90)
def on_speed(self, instance, value):
if self.speed > self.speed_max:
self.speed = self.speed_max
elif self.speed < self.speed_max_backwards:
self.speed = self.speed_max_backwards
def move_foward(self, distance):
angle = self.angle + self.front_angle_adjust
spos = [float(i) for i in utils.coordinates(distance, angle)]
self.spos_add(*spos)
def update(self):
distance = self.pace*self.speed
self.move_foward(distance)
super().update()
class DamageBehavior(Behavior):
damage = NumericProperty(0)
def hit(self, entity):
pass
class DamageableBehavior(Behavior):
life = NumericProperty(100)
def __init__(self, **kw):
self.bind(on_collision=self._on_collision)
super().__init__(**kw)
def _on_collision(self, i, entity, kind):
if isinstance(entity, DamageBehavior):
self.before_damage()
entity.hit(entity)
self.apply_damage(entity)
self.after_damage()
def apply_damage(self, entity):
self.life -= entity.damage
def before_damage(self):
pass
def after_damage(self):
pass
@property
def is_alive(self):
return self.life > 0
class ShooterBehavior(Behavior):
current_weapon = NumericProperty(None, allownone=True)
weapons = ListProperty(None, allownone=True)
def __init__(self, **kw):
self.register_event_type('on_shoot')
super().__init__(**kw)
def shoot(self):
if self.current_weapon is not None:
projectile = self.weapons[self.current_weapon].shoot(self)
if projectile:
self.dispatch('on_shoot', projectile)
def on_shoot(self, projectile):
pass
def add_weapon(self, weapon):
if not self.weapons:
self.current_weapon = 0
self.weapons.append(weapon)
def remove_weapon(self, weapon):
self.weapons.remove(weapon)
def next_weapon(self):
if self.current_weapon == (len(self.weapons) - 1):
self.current_weapon = 0
else:
self.current_weapon += 1
def previous_weapon(self):
if self.current_weapon == 0:
self.current_weapon = (len(self.weapons) - 1)
else:
self.current_weapon -= 1
| none | 1 | 2.865472 | 3 |
|
CodeIA/venv/Lib/site-packages/coremltools/converters/mil/mil/ops/defs/_utils.py | Finasty-lab/IA-Python | 3 | 6630423 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import math
import coremltools.converters
from coremltools.converters.mil.mil import get_new_symbol
from coremltools.converters.mil.mil.types.symbolic import is_symbolic
from ._op_reqs import *
def broadcast_shapes(shape_x, shape_y):
"""
Check and broadcast given input shapes.
:param shape_x: tuple of int or symbols
Shape of the first tensor (possibly symbolic).
:param shape_y: tuple of int or symbols
Shape of the second tensor (possibly symbolic).
:return: tuple of int or symbols
Result from broadcast.
"""
shape_x = tuple(shape_x)
shape_y = tuple(shape_y)
if len(shape_x) < len(shape_y):
shape_x = tuple([1] * (len(shape_y) - len(shape_x))) + shape_x
if len(shape_y) < len(shape_x):
shape_y = tuple([1] * (len(shape_x) - len(shape_y))) + shape_y
ret_shapes = list()
for i in range(len(shape_x)):
x_unknown = is_symbolic(shape_x[i])
y_unknown = is_symbolic(shape_y[i])
if shape_x[i] == 1:
ret_shapes.append(shape_y[i])
elif shape_y[i] == 1:
ret_shapes.append(shape_x[i])
elif not y_unknown and shape_y[i] > 1:
if not x_unknown and shape_x[i] != shape_y[i]:
raise ValueError(
"Incompatible dim {} in shapes {} vs. {}".format(
i, shape_x, shape_y
)
)
ret_shapes.append(shape_y[i])
elif not x_unknown and shape_x[i] > 1:
if not y_unknown and shape_x[i] != shape_y[i]:
raise ValueError(
"Incompatible dim {} in shapes {} vs. {}".format(
i, shape_x, shape_y
)
)
ret_shapes.append(shape_x[i])
elif x_unknown or y_unknown:
ret_shapes.append(get_new_symbol())
else:
assert shape_x[i] == shape_y[i]
ret_shapes.append(shape_x[i])
return tuple(ret_shapes)
def promoted_primitive_type(type1, type2):
"""
Given a pair of tensor or primitive types, find the smallest type that can store an instance
of their primitive type.
"""
ptype1 = type1.get_primitive() if types.is_tensor(type1) else type1
ptype2 = type2.get_primitive() if types.is_tensor(type2) else type2
return types.promote_types(ptype1, ptype2)
def effective_kernel(kernel_shape, dilations):
"""
Args:
kernel_shape: tuple[int] representing the kernel shape in each
given dimension.
dilations: tuple[int] representing the dilation of the kernel
in each given dimension. Must be the same length as
kernel_shape, and is assumed to give the dimensions in
the same order as kernel_shape
Returns: tuple[int] representing the effective shape of the kernel
in each given dimension, with each dimension in the order given,
taking into account dilation.
See http://deeplearning.net/software/theano/tutorial/conv_arithmetic.html#dilated-convolutions
Note that a dilation of 1 is equivalent to having no dilation.
"""
if len(kernel_shape) != len(dilations):
raise ValueError(
"kernel_shape ({}) and dilations ({}) must be the same length".format(
len(kernel_shape), len(dilations)
)
)
return [(k - 1) * d + 1 for k, d in zip(kernel_shape, dilations)]
def aggregated_pad(
pad_type,
kernel_shape,
input_shape=None,
strides=None,
dilations=None,
custom_pad=None,
):
"""
Args
pad_type: string. Must be one of ('same', 'valid', 'custom')
kernel_shape: [kH, kW, ...]: spatial kernel dims (excluding channels)
input_shape: [iH, iW, ...]: spatial input dims (excluding channels)
Required iff pad_type == 'same'
strides: [sH, sW, ...]: spatial strides (excluding channels)
Required iff pad_type == 'same'
dilations: [dH, dW, ...]: dilations (excluding channels)
If not provided, defaults to [1, 1, ...], effectively no dilation.
custom_pad: Required iff pad_type == 'custom'.
custom_pad[2*i], custom_pad[2*i+1] are before/after custom padding
for spatial dim i.
Returns:
A list of total (before + after) padding for each spatial dimension in kernel_shape.
"""
num_spatial_dims = len(kernel_shape)
if dilations is None:
dilations = [1] * num_spatial_dims
elif len(dilations) != num_spatial_dims:
raise ValueError(
"dilations must have same length as kernel_shape ({}, but got {})".format(
num_spatial_dims, len(dilations)
)
)
if pad_type == "same":
if input_shape is None or len(input_shape) != num_spatial_dims:
raise ValueError(
"For SAME padding input_shape must not be None and must have "
"same length as kernel_shape ({}, but got {})".format(
num_spatial_dims,
len(input_shape) if input_shape is not None else "None",
)
)
if strides is None or len(strides) != num_spatial_dims:
raise ValueError(
"For SAME padding strides must not be None and must have "
"same length as kernel_shape ({}, but got {})".format(
num_spatial_dims, len(strides) if strides is not None else "None"
)
)
effective_ks = effective_kernel(kernel_shape, dilations)
return [
int(max(0, s * math.ceil(float(i) / float(s)) - i + k - s))
if not is_symbolic(i) else get_new_symbol()
for i, k, s in zip(input_shape, effective_ks, strides)
]
if pad_type == "valid":
return [0] * num_spatial_dims
if pad_type == "custom":
if custom_pad is None or len(custom_pad) != 2 * num_spatial_dims:
raise ValueError("Invalid custom_pad.")
return [
custom_pad[2 * d] + custom_pad[2 * d + 1] for d in range(num_spatial_dims)
]
raise ValueError('Invalid padding pad_type "{}"'.format(pad_type))
def spatial_dimensions_out_shape(
pad_type, input_shape, kernel_shape, strides, dilations=None, custom_pad=None
):
"""
Args
pad_type: string. Must be one of ('same', 'valid', 'custom')
input_shape: [iH, iW, ...]: spatial input dims (excluding channels)
Required iff pad_type == 'same'
kernel_shape: [kH, kW, ...]: spatial kernel dims (excluding channels)
strides: [sH, sW, ...]: spatial strides (excluding channels)
Required iff pad_type == 'same'
dilations: [dH, dW, ...]: dilations (excluding channels)
If not provided, defaults to [1, 1, ...], effectively no dilation.
custom_pad: Required iff pad_type == 'custom'.
custom_pad[2*i], custom_pad[2*i+1] are before/after custom padding
for spatial dim i.
Returns:
A list of spatial output sizes for each spatial dimension of kernel_shape.
"""
num_spatial_dims = len(kernel_shape)
if dilations is None:
dilations = [1] * num_spatial_dims
if custom_pad is None:
custom_pad = [0] * num_spatial_dims * 2
if not (
len(input_shape)
== len(kernel_shape)
== len(strides)
== len(dilations)
== len(custom_pad) / 2
):
raise ValueError(
"input_shape (length {}), kernel_shape (length {}), "
"strides (length {}), dilations (length {}), and "
"custom_pad (length {}) divided by two must all be "
"the same length".format(
len(input_shape),
len(kernel_shape),
len(strides),
len(dilations),
len(custom_pad),
)
)
pad = aggregated_pad(
pad_type=pad_type,
kernel_shape=kernel_shape,
input_shape=input_shape,
strides=strides,
dilations=dilations,
custom_pad=custom_pad,
)
effective_ks = effective_kernel(kernel_shape, dilations)
out_shape = [
(input_shape[r] + pad[r] - effective_ks[r]) // strides[r] + 1
for r in range(num_spatial_dims)
]
return [dim if not is_symbolic(dim) else get_new_symbol() for dim in out_shape]
| # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import math
import coremltools.converters
from coremltools.converters.mil.mil import get_new_symbol
from coremltools.converters.mil.mil.types.symbolic import is_symbolic
from ._op_reqs import *
def broadcast_shapes(shape_x, shape_y):
"""
Check and broadcast given input shapes.
:param shape_x: tuple of int or symbols
Shape of the first tensor (possibly symbolic).
:param shape_y: tuple of int or symbols
Shape of the second tensor (possibly symbolic).
:return: tuple of int or symbols
Result from broadcast.
"""
shape_x = tuple(shape_x)
shape_y = tuple(shape_y)
if len(shape_x) < len(shape_y):
shape_x = tuple([1] * (len(shape_y) - len(shape_x))) + shape_x
if len(shape_y) < len(shape_x):
shape_y = tuple([1] * (len(shape_x) - len(shape_y))) + shape_y
ret_shapes = list()
for i in range(len(shape_x)):
x_unknown = is_symbolic(shape_x[i])
y_unknown = is_symbolic(shape_y[i])
if shape_x[i] == 1:
ret_shapes.append(shape_y[i])
elif shape_y[i] == 1:
ret_shapes.append(shape_x[i])
elif not y_unknown and shape_y[i] > 1:
if not x_unknown and shape_x[i] != shape_y[i]:
raise ValueError(
"Incompatible dim {} in shapes {} vs. {}".format(
i, shape_x, shape_y
)
)
ret_shapes.append(shape_y[i])
elif not x_unknown and shape_x[i] > 1:
if not y_unknown and shape_x[i] != shape_y[i]:
raise ValueError(
"Incompatible dim {} in shapes {} vs. {}".format(
i, shape_x, shape_y
)
)
ret_shapes.append(shape_x[i])
elif x_unknown or y_unknown:
ret_shapes.append(get_new_symbol())
else:
assert shape_x[i] == shape_y[i]
ret_shapes.append(shape_x[i])
return tuple(ret_shapes)
def promoted_primitive_type(type1, type2):
"""
Given a pair of tensor or primitive types, find the smallest type that can store an instance
of their primitive type.
"""
ptype1 = type1.get_primitive() if types.is_tensor(type1) else type1
ptype2 = type2.get_primitive() if types.is_tensor(type2) else type2
return types.promote_types(ptype1, ptype2)
def effective_kernel(kernel_shape, dilations):
"""
Args:
kernel_shape: tuple[int] representing the kernel shape in each
given dimension.
dilations: tuple[int] representing the dilation of the kernel
in each given dimension. Must be the same length as
kernel_shape, and is assumed to give the dimensions in
the same order as kernel_shape
Returns: tuple[int] representing the effective shape of the kernel
in each given dimension, with each dimension in the order given,
taking into account dilation.
See http://deeplearning.net/software/theano/tutorial/conv_arithmetic.html#dilated-convolutions
Note that a dilation of 1 is equivalent to having no dilation.
"""
if len(kernel_shape) != len(dilations):
raise ValueError(
"kernel_shape ({}) and dilations ({}) must be the same length".format(
len(kernel_shape), len(dilations)
)
)
return [(k - 1) * d + 1 for k, d in zip(kernel_shape, dilations)]
def aggregated_pad(
pad_type,
kernel_shape,
input_shape=None,
strides=None,
dilations=None,
custom_pad=None,
):
"""
Args
pad_type: string. Must be one of ('same', 'valid', 'custom')
kernel_shape: [kH, kW, ...]: spatial kernel dims (excluding channels)
input_shape: [iH, iW, ...]: spatial input dims (excluding channels)
Required iff pad_type == 'same'
strides: [sH, sW, ...]: spatial strides (excluding channels)
Required iff pad_type == 'same'
dilations: [dH, dW, ...]: dilations (excluding channels)
If not provided, defaults to [1, 1, ...], effectively no dilation.
custom_pad: Required iff pad_type == 'custom'.
custom_pad[2*i], custom_pad[2*i+1] are before/after custom padding
for spatial dim i.
Returns:
A list of total (before + after) padding for each spatial dimension in kernel_shape.
"""
num_spatial_dims = len(kernel_shape)
if dilations is None:
dilations = [1] * num_spatial_dims
elif len(dilations) != num_spatial_dims:
raise ValueError(
"dilations must have same length as kernel_shape ({}, but got {})".format(
num_spatial_dims, len(dilations)
)
)
if pad_type == "same":
if input_shape is None or len(input_shape) != num_spatial_dims:
raise ValueError(
"For SAME padding input_shape must not be None and must have "
"same length as kernel_shape ({}, but got {})".format(
num_spatial_dims,
len(input_shape) if input_shape is not None else "None",
)
)
if strides is None or len(strides) != num_spatial_dims:
raise ValueError(
"For SAME padding strides must not be None and must have "
"same length as kernel_shape ({}, but got {})".format(
num_spatial_dims, len(strides) if strides is not None else "None"
)
)
effective_ks = effective_kernel(kernel_shape, dilations)
return [
int(max(0, s * math.ceil(float(i) / float(s)) - i + k - s))
if not is_symbolic(i) else get_new_symbol()
for i, k, s in zip(input_shape, effective_ks, strides)
]
if pad_type == "valid":
return [0] * num_spatial_dims
if pad_type == "custom":
if custom_pad is None or len(custom_pad) != 2 * num_spatial_dims:
raise ValueError("Invalid custom_pad.")
return [
custom_pad[2 * d] + custom_pad[2 * d + 1] for d in range(num_spatial_dims)
]
raise ValueError('Invalid padding pad_type "{}"'.format(pad_type))
def spatial_dimensions_out_shape(
pad_type, input_shape, kernel_shape, strides, dilations=None, custom_pad=None
):
"""
Args
pad_type: string. Must be one of ('same', 'valid', 'custom')
input_shape: [iH, iW, ...]: spatial input dims (excluding channels)
Required iff pad_type == 'same'
kernel_shape: [kH, kW, ...]: spatial kernel dims (excluding channels)
strides: [sH, sW, ...]: spatial strides (excluding channels)
Required iff pad_type == 'same'
dilations: [dH, dW, ...]: dilations (excluding channels)
If not provided, defaults to [1, 1, ...], effectively no dilation.
custom_pad: Required iff pad_type == 'custom'.
custom_pad[2*i], custom_pad[2*i+1] are before/after custom padding
for spatial dim i.
Returns:
A list of spatial output sizes for each spatial dimension of kernel_shape.
"""
num_spatial_dims = len(kernel_shape)
if dilations is None:
dilations = [1] * num_spatial_dims
if custom_pad is None:
custom_pad = [0] * num_spatial_dims * 2
if not (
len(input_shape)
== len(kernel_shape)
== len(strides)
== len(dilations)
== len(custom_pad) / 2
):
raise ValueError(
"input_shape (length {}), kernel_shape (length {}), "
"strides (length {}), dilations (length {}), and "
"custom_pad (length {}) divided by two must all be "
"the same length".format(
len(input_shape),
len(kernel_shape),
len(strides),
len(dilations),
len(custom_pad),
)
)
pad = aggregated_pad(
pad_type=pad_type,
kernel_shape=kernel_shape,
input_shape=input_shape,
strides=strides,
dilations=dilations,
custom_pad=custom_pad,
)
effective_ks = effective_kernel(kernel_shape, dilations)
out_shape = [
(input_shape[r] + pad[r] - effective_ks[r]) // strides[r] + 1
for r in range(num_spatial_dims)
]
return [dim if not is_symbolic(dim) else get_new_symbol() for dim in out_shape]
| en | 0.772015 | # Copyright (c) 2020, Apple Inc. All rights reserved. # # Use of this source code is governed by a BSD-3-clause license that can be # found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause Check and broadcast given input shapes. :param shape_x: tuple of int or symbols Shape of the first tensor (possibly symbolic). :param shape_y: tuple of int or symbols Shape of the second tensor (possibly symbolic). :return: tuple of int or symbols Result from broadcast. Given a pair of tensor or primitive types, find the smallest type that can store an instance of their primitive type. Args: kernel_shape: tuple[int] representing the kernel shape in each given dimension. dilations: tuple[int] representing the dilation of the kernel in each given dimension. Must be the same length as kernel_shape, and is assumed to give the dimensions in the same order as kernel_shape Returns: tuple[int] representing the effective shape of the kernel in each given dimension, with each dimension in the order given, taking into account dilation. See http://deeplearning.net/software/theano/tutorial/conv_arithmetic.html#dilated-convolutions Note that a dilation of 1 is equivalent to having no dilation. Args pad_type: string. Must be one of ('same', 'valid', 'custom') kernel_shape: [kH, kW, ...]: spatial kernel dims (excluding channels) input_shape: [iH, iW, ...]: spatial input dims (excluding channels) Required iff pad_type == 'same' strides: [sH, sW, ...]: spatial strides (excluding channels) Required iff pad_type == 'same' dilations: [dH, dW, ...]: dilations (excluding channels) If not provided, defaults to [1, 1, ...], effectively no dilation. custom_pad: Required iff pad_type == 'custom'. custom_pad[2*i], custom_pad[2*i+1] are before/after custom padding for spatial dim i. Returns: A list of total (before + after) padding for each spatial dimension in kernel_shape. Args pad_type: string. Must be one of ('same', 'valid', 'custom') input_shape: [iH, iW, ...]: spatial input dims (excluding channels) Required iff pad_type == 'same' kernel_shape: [kH, kW, ...]: spatial kernel dims (excluding channels) strides: [sH, sW, ...]: spatial strides (excluding channels) Required iff pad_type == 'same' dilations: [dH, dW, ...]: dilations (excluding channels) If not provided, defaults to [1, 1, ...], effectively no dilation. custom_pad: Required iff pad_type == 'custom'. custom_pad[2*i], custom_pad[2*i+1] are before/after custom padding for spatial dim i. Returns: A list of spatial output sizes for each spatial dimension of kernel_shape. | 2.344327 | 2 |
Doc/sphinx-examples/tutorial/going_further/src/add-trigger.py | mpartio/ecflow | 11 | 6630424 | <reponame>mpartio/ecflow
#!/usr/bin/env python2.7
import os
import ecflow
def create_family_f1():
f1 = ecflow.Family("f1")
f1.add_variable("SLEEP", 20)
f1.add_task("t1")
f1.add_task("t2").add_trigger("t1 eq complete")
return f1
print "Creating suite definition"
defs = ecflow.Defs()
suite = defs.add_suite("test")
suite.add_variable("ECF_INCLUDE", os.getenv("HOME") + "/course")
suite.add_variable("ECF_HOME", os.getenv("HOME") + "/course")
suite.add_family( create_family_f1() )
print defs
print "Checking job creation: .ecf -> .job0"
print defs.check_job_creation()
print "Checking trigger expressions"
print defs.check()
print "Saving definition to file 'test.def'"
defs.save_as_defs("test.def") | #!/usr/bin/env python2.7
import os
import ecflow
def create_family_f1():
f1 = ecflow.Family("f1")
f1.add_variable("SLEEP", 20)
f1.add_task("t1")
f1.add_task("t2").add_trigger("t1 eq complete")
return f1
print "Creating suite definition"
defs = ecflow.Defs()
suite = defs.add_suite("test")
suite.add_variable("ECF_INCLUDE", os.getenv("HOME") + "/course")
suite.add_variable("ECF_HOME", os.getenv("HOME") + "/course")
suite.add_family( create_family_f1() )
print defs
print "Checking job creation: .ecf -> .job0"
print defs.check_job_creation()
print "Checking trigger expressions"
print defs.check()
print "Saving definition to file 'test.def'"
defs.save_as_defs("test.def") | ru | 0.174408 | #!/usr/bin/env python2.7 | 2.490099 | 2 |
project/project/routers.py | Gerhut/django-read-write-split | 0 | 6630425 | import random
class MasterSlaveRouter(object):
def db_for_read(self, model, **hints):
return random.choice(('slave1', 'slave2'))
def db_for_write(self, model, **hints):
return 'master'
| import random
class MasterSlaveRouter(object):
def db_for_read(self, model, **hints):
return random.choice(('slave1', 'slave2'))
def db_for_write(self, model, **hints):
return 'master'
| none | 1 | 2.769305 | 3 |
|
release/stubs.min/Rhino/Geometry/__init___parts/TextEntity.py | htlcnn/ironpython-stubs | 182 | 6630426 | class TextEntity(AnnotationBase,IDisposable,ISerializable):
"""
Represents text geometry.
This class refers to the geometric element that is independent from the document.
TextEntity()
"""
def ConstructConstObject(self,*args):
"""
ConstructConstObject(self: CommonObject,parentObject: object,subobject_index: int)
Assigns a parent object and a subobject index to this.
parentObject: The parent object.
subobject_index: The subobject index.
"""
pass
def Dispose(self):
"""
Dispose(self: CommonObject,disposing: bool)
For derived class implementers.
This method is called with argument true when class
user calls Dispose(),while with argument false when
the Garbage Collector invokes
the finalizer,or Finalize() method.You must reclaim all used unmanaged resources in both cases,
and can use this chance to call Dispose on disposable fields if the argument is true.Also,you
must call the base virtual method within your overriding method.
disposing: true if the call comes from the Dispose() method; false if it comes from the Garbage Collector
finalizer.
"""
pass
def Explode(self):
"""
Explode(self: TextEntity) -> Array[Curve]
Explodes this text entity into an array of curves.
Returns: An array of curves that forms the outline or content of this text entity.
"""
pass
def NonConstOperation(self,*args):
"""
NonConstOperation(self: CommonObject)
For derived classes implementers.
Defines the necessary implementation to free the
instance from being const.
"""
pass
def OnSwitchToNonConst(self,*args):
"""
OnSwitchToNonConst(self: GeometryBase)
Is called when a non-const operation occurs.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self):
"""
__new__(cls: type)
__new__(cls: type,info: SerializationInfo,context: StreamingContext)
"""
pass
def __reduce_ex__(self,*args):
pass
AnnotativeScalingEnabled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Scale annotation according to detail scale factor in paperspace
or by 1.0 in paperspace and not in a detail
Otherwise,dimscale or text scale is used
Get: AnnotativeScalingEnabled(self: TextEntity) -> bool
Set: AnnotativeScalingEnabled(self: TextEntity)=value
"""
FontIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the index of font in document font table used by the text.
Get: FontIndex(self: TextEntity) -> int
Set: FontIndex(self: TextEntity)=value
"""
Justification=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the justification of text in relation to its base point.
Get: Justification(self: TextEntity) -> TextJustification
Set: Justification(self: TextEntity)=value
"""
MaskColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Color to use for drawing a text mask when it is enabled. If the mask is
enabled and MaskColor is System.Drawing.Color.Transparent,then the
viewport's color will be used for the MaskColor
Get: MaskColor(self: TextEntity) -> Color
Set: MaskColor(self: TextEntity)=value
"""
MaskEnabled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Determines whether or not to draw a Text Mask
Get: MaskEnabled(self: TextEntity) -> bool
Set: MaskEnabled(self: TextEntity)=value
"""
MaskOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""distance around text to display mask
Get: MaskOffset(self: TextEntity) -> float
Set: MaskOffset(self: TextEntity)=value
"""
MaskUsesViewportColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""If true,the viewport's color is used for the mask color. If
false,the color defined by MaskColor is used
Get: MaskUsesViewportColor(self: TextEntity) -> bool
Set: MaskUsesViewportColor(self: TextEntity)=value
"""
| class TextEntity(AnnotationBase,IDisposable,ISerializable):
"""
Represents text geometry.
This class refers to the geometric element that is independent from the document.
TextEntity()
"""
def ConstructConstObject(self,*args):
"""
ConstructConstObject(self: CommonObject,parentObject: object,subobject_index: int)
Assigns a parent object and a subobject index to this.
parentObject: The parent object.
subobject_index: The subobject index.
"""
pass
def Dispose(self):
"""
Dispose(self: CommonObject,disposing: bool)
For derived class implementers.
This method is called with argument true when class
user calls Dispose(),while with argument false when
the Garbage Collector invokes
the finalizer,or Finalize() method.You must reclaim all used unmanaged resources in both cases,
and can use this chance to call Dispose on disposable fields if the argument is true.Also,you
must call the base virtual method within your overriding method.
disposing: true if the call comes from the Dispose() method; false if it comes from the Garbage Collector
finalizer.
"""
pass
def Explode(self):
"""
Explode(self: TextEntity) -> Array[Curve]
Explodes this text entity into an array of curves.
Returns: An array of curves that forms the outline or content of this text entity.
"""
pass
def NonConstOperation(self,*args):
"""
NonConstOperation(self: CommonObject)
For derived classes implementers.
Defines the necessary implementation to free the
instance from being const.
"""
pass
def OnSwitchToNonConst(self,*args):
"""
OnSwitchToNonConst(self: GeometryBase)
Is called when a non-const operation occurs.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self):
"""
__new__(cls: type)
__new__(cls: type,info: SerializationInfo,context: StreamingContext)
"""
pass
def __reduce_ex__(self,*args):
pass
AnnotativeScalingEnabled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Scale annotation according to detail scale factor in paperspace
or by 1.0 in paperspace and not in a detail
Otherwise,dimscale or text scale is used
Get: AnnotativeScalingEnabled(self: TextEntity) -> bool
Set: AnnotativeScalingEnabled(self: TextEntity)=value
"""
FontIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the index of font in document font table used by the text.
Get: FontIndex(self: TextEntity) -> int
Set: FontIndex(self: TextEntity)=value
"""
Justification=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the justification of text in relation to its base point.
Get: Justification(self: TextEntity) -> TextJustification
Set: Justification(self: TextEntity)=value
"""
MaskColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Color to use for drawing a text mask when it is enabled. If the mask is
enabled and MaskColor is System.Drawing.Color.Transparent,then the
viewport's color will be used for the MaskColor
Get: MaskColor(self: TextEntity) -> Color
Set: MaskColor(self: TextEntity)=value
"""
MaskEnabled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Determines whether or not to draw a Text Mask
Get: MaskEnabled(self: TextEntity) -> bool
Set: MaskEnabled(self: TextEntity)=value
"""
MaskOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""distance around text to display mask
Get: MaskOffset(self: TextEntity) -> float
Set: MaskOffset(self: TextEntity)=value
"""
MaskUsesViewportColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""If true,the viewport's color is used for the mask color. If
false,the color defined by MaskColor is used
Get: MaskUsesViewportColor(self: TextEntity) -> bool
Set: MaskUsesViewportColor(self: TextEntity)=value
"""
| en | 0.613244 | Represents text geometry.
This class refers to the geometric element that is independent from the document.
TextEntity() ConstructConstObject(self: CommonObject,parentObject: object,subobject_index: int)
Assigns a parent object and a subobject index to this.
parentObject: The parent object.
subobject_index: The subobject index. Dispose(self: CommonObject,disposing: bool)
For derived class implementers.
This method is called with argument true when class
user calls Dispose(),while with argument false when
the Garbage Collector invokes
the finalizer,or Finalize() method.You must reclaim all used unmanaged resources in both cases,
and can use this chance to call Dispose on disposable fields if the argument is true.Also,you
must call the base virtual method within your overriding method.
disposing: true if the call comes from the Dispose() method; false if it comes from the Garbage Collector
finalizer. Explode(self: TextEntity) -> Array[Curve]
Explodes this text entity into an array of curves.
Returns: An array of curves that forms the outline or content of this text entity. NonConstOperation(self: CommonObject)
For derived classes implementers.
Defines the necessary implementation to free the
instance from being const. OnSwitchToNonConst(self: GeometryBase)
Is called when a non-const operation occurs. __enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable. __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable. x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature __new__(cls: type)
__new__(cls: type,info: SerializationInfo,context: StreamingContext) Scale annotation according to detail scale factor in paperspace
or by 1.0 in paperspace and not in a detail
Otherwise,dimscale or text scale is used
Get: AnnotativeScalingEnabled(self: TextEntity) -> bool
Set: AnnotativeScalingEnabled(self: TextEntity)=value Gets or sets the index of font in document font table used by the text.
Get: FontIndex(self: TextEntity) -> int
Set: FontIndex(self: TextEntity)=value Gets or sets the justification of text in relation to its base point.
Get: Justification(self: TextEntity) -> TextJustification
Set: Justification(self: TextEntity)=value Color to use for drawing a text mask when it is enabled. If the mask is
enabled and MaskColor is System.Drawing.Color.Transparent,then the
viewport's color will be used for the MaskColor
Get: MaskColor(self: TextEntity) -> Color
Set: MaskColor(self: TextEntity)=value Determines whether or not to draw a Text Mask
Get: MaskEnabled(self: TextEntity) -> bool
Set: MaskEnabled(self: TextEntity)=value distance around text to display mask
Get: MaskOffset(self: TextEntity) -> float
Set: MaskOffset(self: TextEntity)=value If true,the viewport's color is used for the mask color. If
false,the color defined by MaskColor is used
Get: MaskUsesViewportColor(self: TextEntity) -> bool
Set: MaskUsesViewportColor(self: TextEntity)=value | 2.402431 | 2 |
scripts/perf/alltime.py | mhbliao/rocFFT | 0 | 6630427 | #!/usr/bin/python3
import sys, getopt
import numpy as np
from math import *
import subprocess
import os
import re # regexp package
import shutil
import tempfile
usage = '''A timing script rocfft that generates a lot of data
Usage:
\talltime.py
\t\t-b Specify binary for dload executable (optional)
\t\t-i Append to list of binary directories (appendable)
\t\t-o Specify output directories for raw data
\t\t appendable; defaults to "dir0", "dir1", etc.
\t\t-l Specify labels for runs
\t\t appendable; defaults to "dir0", "dir1", etc.
\t\t-w output directory for graphs and final document
\t\t-S plot speedup (default: 1, disabled: 0)
\t\t-t data type: time (default) or gflops or roofline
\t\t-y secondary acix type: none or gflops
\t\t-s short run
\t\t-T do not perform FFTs; just generate document
\t\t-f document format: pdf (default) or docx
\t\t-g generate graphs via Asymptote: 0(default) or 1
\t\t-d device number (default: 0)
\t\t-N Number of samples (default: 10)
\t\t-D dims to test. default: 1,2,3
\t\t-R runtype: report benchmark or efficiency
'''
def nextpow(val, radix):
x = 1
while(x <= val):
x *= radix
return x
# A class for generating data for figures.
class rundata:
def __init__(self, label,
dimension, minsize, maxsize, nbatch, radix, ratio, ffttype,
direction, inplace):
self.dimension = dimension
self.minsize = minsize
self.maxsize = maxsize
self.nbatch = nbatch
self.radix = radix
self.ratio = ratio
self.ffttype = ffttype
self.precision = "double"
self.inplace = inplace
self.direction = direction
self.label = label
def outfilename(self, odir):
outfile = ""
outfile += "radix" + str(self.radix)
outfile += "_dim" + str(self.dimension)
outfile += "_" + self.precision
outfile += "_n" + str(self.nbatch)
if self.direction == 1:
outfile += "_inv"
if self.dimension > 1:
outfile += "_ratio" + "_" + str(self.ratio[0])
if self.dimension > 2:
outfile += "_" + str(self.ratio[1])
outfile += "_" + self.ffttype
if self.inplace:
outfile += "_inplace"
else:
outfile += "_outofplace"
outfile += ".dat"
outfile = os.path.join(odir, outfile)
return outfile
def runcmd(self, nsample, indirlist, outdirlist, dloadexe):
cmd = [os.path.join(sys.path[0],"timing.py")]
if dloadexe == None:
# When not using dload, we just have one input and output dir.
cmd.append("-w")
cmd.append(os.path.abspath(indirlist[0]))
cmd.append("-o")
cmd.append(self.outfilename(outdirlist[0]))
else:
cmd.append("-w")
cmd.append(dloadexe)
for indir in indirlist:
cmd.append("-i")
cmd.append(indir)
for outdir in outdirlist:
cmd.append("-o")
cmd.append(self.outfilename(outdir))
cmd.append("-N")
cmd.append(str(nsample))
cmd.append("-b")
cmd.append(str(self.nbatch))
cmd.append("-x")
cmd.append(str(self.minsize))
cmd.append("-X")
cmd.append(str(self.maxsize))
if self.dimension > 1:
cmd.append("-y")
cmd.append(str(self.minsize * self.ratio[0]))
cmd.append("-Y")
cmd.append(str(self.maxsize * self.ratio[0]))
if self.dimension > 2:
cmd.append("-z")
cmd.append(str(self.minsize * self.ratio[1]))
cmd.append("-Z")
cmd.append(str(self.maxsize * self.ratio[1]))
cmd.append("-r")
cmd.append(str(self.radix))
cmd.append("-D")
cmd.append(str(self.direction))
cmd.append("-d")
cmd.append(str(self.dimension))
cmd.append("-f")
cmd.append(self.precision)
if self.ffttype == "r2c":
cmd.append("-R")
return cmd
def executerun(self, nsample, indirlist, outdirlist, dloadexe):
fout = tempfile.TemporaryFile(mode="w+")
ferr = tempfile.TemporaryFile(mode="w+")
if dloadexe != None:
cmd = self.runcmd(nsample, indirlist, outdirlist, dloadexe)
print(" ".join(cmd))
proc = subprocess.Popen(cmd,
stdout=fout, stderr=ferr,
env=os.environ.copy())
# FIXME: copy log to multiple outputs?
proc.wait()
rc = proc.returncode
if rc != 0:
print("****fail****")
else:
for idx in range(min(len(indirlist), len(outdirlist))):
print(idx, ":", indirlist[idx], "->", outdirlist[idx], flush=True)
cmd = self.runcmd(nsample, [indirlist[idx]], [outdirlist[idx]], None)
print(" ".join(cmd))
proc = subprocess.Popen(cmd,
stdout=fout, stderr=ferr,
env=os.environ.copy())
proc.wait()
rc = proc.returncode
if rc != 0:
print("****fail****")
return 0
# Figure class, which contains runs and provides commands to generate figures.
class figure:
def __init__(self, name, caption):
self.name = name
self.runs = []
self.caption = caption
def inputfiles(self, outdirlist):
import os
files = []
for run in self.runs:
for outdir in outdirlist:
files.append(run.outfilename(outdir))
print(files)
return files
def labels(self, labellist):
labels = []
for run in self.runs:
for label in labellist:
labels.append(label + run.label)
return labels
def filename(self, outdir, docformat):
outfigure = self.name
outfigure += ".pdf"
# if docformat == "pdf":
# outfigure += ".pdf"
# if docformat == "docx":
# outfigure += ".png"
return os.path.join(outdir, outfigure)
def asycmd(self, docdir, outdirlist, labellist, docformat, datatype, ncompare, secondtype, just1dc2crad2):
asycmd = ["asy"]
asycmd.append("-f")
asycmd.append("pdf")
# if docformat == "pdf":
# asycmd.append("-f")
# asycmd.append("pdf")
# if docformat == "docx":
# asycmd.append("-f")
# asycmd.append("png")
# asycmd.append("-render")
# asycmd.append("8")
asycmd.append(os.path.join(sys.path[0],"datagraphs.asy"))
asycmd.append("-u")
inputfiles = self.inputfiles(outdirlist)
asycmd.append('filenames="' + ",".join(inputfiles) + '"')
asycmd.append("-u")
labels = self.labels(labellist)
asycmd.append('legendlist="' + ",".join(labels) + '"')
asycmd.append("-u")
asycmd.append('speedup=' + str(ncompare))
if just1dc2crad2 :
asycmd.append("-u")
asycmd.append('just1dc2crad2=true')
if secondtype == "gflops":
asycmd.append("-u")
asycmd.append('secondarygflops=true')
if datatype == "gflops":
asycmd.append("-u")
asycmd.append('primaryaxis="gflops"')
if datatype == "roofline":
asycmd.append("-u")
asycmd.append('primaryaxis="roofline"')
# roofline on multiple devices doesn't really make sense; just use the first device
with open(os.path.join(outdirlist[0], "gpuid.txt"), "r") as f:
gpuid = f.read()
asycmd.append("-u")
asycmd.append('gpuid="' + gpuid.strip() + '"')
if len(self.runs) > 0:
asycmd.append("-u")
asycmd.append('batchsize=' + str(self.runs[0].nbatch))
asycmd.append("-u")
asycmd.append('problemdim=' + str(self.runs[0].dimension))
asycmd.append("-u")
val = 1
for rat in self.runs[0].ratio:
val *= rat
asycmd.append('problemratio=' + str(val))
asycmd.append("-u")
if self.runs[0].ffttype == "r2c":
asycmd.append("realcomplex=true")
else:
asycmd.append("realcomplex=false")
asycmd.append("-o")
asycmd.append(self.filename(docdir, docformat) )
return asycmd
def executeasy(self, docdir, outdirs, labellist, docformat, datatype, ncompare, secondtype,
just1dc2crad2):
fout = tempfile.TemporaryFile(mode="w+")
ferr = tempfile.TemporaryFile(mode="w+")
asyproc = subprocess.Popen(self.asycmd(docdir, outdirs, labellist,
docformat, datatype, ncompare, secondtype,
just1dc2crad2),
stdout=fout, stderr=ferr, env=os.environ.copy(),
cwd = sys.path[0])
asyproc.wait()
asyrc = asyproc.returncode
if asyrc != 0:
print("****asy fail****")
fout.seek(0)
cout = fout.read()
print(cout)
ferr.seek(0)
cerr = ferr.read()
print(cerr)
return asyrc
# Function for generating figures for benchmark output
def benchfigs(rundims, shortrun):
figs = []
# FFT directions
forwards = -1
backwards = 1
if 1 in rundims:
dimension = 1
nbatch = 1
min1d = 256 if shortrun else 1024
max1d = 4000 if shortrun else 536870912
for inplace in [True, False]:
fig = figure("1d_c2c" + ("inplace" if inplace else "outofplace"),
"1D complex transforms " + ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix),
dimension, nextpow(min1d, radix), max1d, nbatch,
radix, [], "c2c", forwards, inplace) )
figs.append(fig)
for inplace in [True, False]:
fig = figure("1d_r2c" + ("inplace" if inplace else "outofplace")
, "1D real-to-complex transforms " \
+ ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix),
dimension, nextpow(min1d, radix), max1d, nbatch,
radix, [], "r2c", forwards, inplace) )
figs.append(fig)
for inplace in [True, False]:
fig = figure("1d_c2r" + ("inplace" if inplace else "outofplace"),
"1D complex-to-real transforms " \
+ ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix) ,
dimension, nextpow(min1d, radix), max1d, nbatch,
radix, [], "r2c", backwards, inplace) )
figs.append(fig)
if 2 in rundims:
dimension = 2
nbatch = 1
min2d = 64 if shortrun else 128
max2d = 8192 if shortrun else 32768
for inplace in [True, False]:
fig = figure("2d_c2c" + ("inplace" if inplace else "outofplace"),
"2D complex transforms " + ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix), dimension,
nextpow(min2d, radix), max2d, nbatch, radix, [1],
"c2c",
forwards, inplace) )
figs.append(fig)
for inplace in [True, False]:
fig = figure("2d_r2c" + ("inplace" if inplace else "outofplace"),
"2D real-to-complex transforms " \
+ ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix), dimension,
nextpow(min2d, radix), max2d, nbatch, radix, [1],
"r2c",
forwards, inplace) )
figs.append(fig)
for inplace in [True, False]:
fig = figure("2d_c2r" + ("inplace" if inplace else "outofplace"),
"2D complex-to-real transforms " \
+ ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix), dimension,
nextpow(min2d, radix), max2d, nbatch, radix, [1],
"r2c",
backwards, inplace) )
figs.append(fig)
if 3 in rundims:
dimension = 3
min3d = 16
max3d = 128 if shortrun else 1024
nbatch = 1
for inplace in [True]:
fig = figure("3d_c2c" + ("inplace" if inplace else "outofplace"),
"3D complex transforms " + ("in-place" if inplace else "out-of-place"))
for radix in [2, 3, 5]:
fig.runs.append( rundata("radix " + str(radix), dimension,
nextpow(min3d, radix), max3d, nbatch, radix, [1,1],
"c2c",
forwards, inplace) )
figs.append(fig)
for inplace in [True, False]:
fig = figure("3d_r2c" + ("inplace" if inplace else "outofplace")
, "3D real-to-complex transforms " \
+ ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix), dimension,
nextpow(min3d, radix), max3d, nbatch, radix, [1,1],
"r2c",
forwards, inplace) )
figs.append(fig)
for inplace in [True, False]:
fig = figure("3d_c2r" + ("inplace" if inplace else "outofplace"),
"3D complex-to-real transforms " \
+ ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix), dimension,
nextpow(min3d, radix), max3d, nbatch, radix, [1,1],
"r2c",
backwards, inplace) )
figs.append(fig)
return figs
def efficiencyfigs(rundims, shortrun):
figs = []
# FFT directions
forwards = -1
backwards = 1
inplace = True
dimension = 1
radix = 2
min1d = 1024
max1d = 1048576 if shortrun else 268435456 #pow(2,28) gives a floating type :(
nbatch = 1
while max1d > min1d:
fig = figure("1d_c2c_batch" + str(nbatch) + "_radix" + str(radix),
"1D complex transforms " + ("in-place" if inplace else "out-of-place") + " radix " + str(radix) + " batch " + str(nbatch) )
fig.runs.append( rundata("radix " + str(radix),
dimension, nextpow(min1d, radix), max1d, nbatch,
radix, [], "c2c", forwards, inplace) )
figs.append(fig)
nbatch *= 2
max1d //= 2
min1d //= 2
min1d = max(min1d, 2^5)
return figs
# Function for generating figures for a performance report
def reportfigs(rundims, shortrun):
figs = []
# FFT directions
forwards = -1
backwards = 1
inplace = True
if 1 in rundims:
dimension = 1
for min1d, max1d, nbatch in [[1024,536870912,1], [8,32768,100000]]:
for radix in [2, 3, 5, 7]:
fig = figure("1d_c2c" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"1D complex transforms with radix " + str(radix)\
+ " and batch size " + str(nbatch) + "." )
fig.runs.append( rundata("radix " + str(radix),
dimension, nextpow(min1d, radix),
max1d, nbatch,
radix, [], "c2c", forwards,
inplace) )
figs.append(fig)
for radix in [2, 3, 5, 7]:
fig = figure("1d_r2c"\
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"1D real-to-complex transforms with radix "\
+ str(radix) \
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix " + str(radix),
dimension, nextpow(min1d, radix),
max1d, nbatch,
radix, [], "r2c", forwards,
inplace) )
figs.append(fig)
for radix in [2, 3, 5, 7]:
fig = figure("1d_c2r" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"1D complex-to-real transforms with radix " \
+ str(radix) \
+ " and batch size " + str(nbatch) + "." )
fig.runs.append( rundata("radix " + str(radix),
dimension, nextpow(min1d, radix),
max1d, nbatch,
radix, [], "r2c", backwards,
inplace) )
figs.append(fig)
if 2 in rundims:
dimension = 2
for min2d, max2d, nbatch in [[128,32768,1], [64,8192,100]]:
for radix in [2, 3, 5]:
fig = figure("2d_c2c" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch) ,
"2D complex transforms with radix " + str(radix)\
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata( "radix "+ str(radix),
dimension,
nextpow(min2d, radix), max2d,
nbatch,
radix, [1], "c2c",
forwards, inplace) )
figs.append(fig)
for radix in [2, 3, 5]:
fig = figure("2d_r2c" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"2D real-to-complex transforms with radix "\
+ str(radix) \
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata( "radix " + str(radix),
dimension,
nextpow(min2d, radix), max2d,
nbatch,
radix, [1], "r2c",
forwards, inplace) )
figs.append(fig)
for radix in [2, 3, 5]:
fig = figure("2d_c2r" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"2D complex-to-real transforms with radix "\
+ str(radix) +\
" and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix " + str(radix),
dimension,
nextpow(min2d, radix), max2d,
nbatch,
radix, [1], "r2c",
backwards, inplace) )
figs.append(fig)
for radix in [2]:
fig = figure("2d_c2c_r2" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"2D complex transforms "\
+ "with aspect ratio N:2N with radix "\
+ str(radix) + " and batch size " + str(nbatch) \
+ ".")
fig.runs.append( rundata( "radix 2",
dimension, min2d, max2d, nbatch, 2,
[2], "c2c",
forwards, inplace) )
figs.append(fig)
for radix in [2]:
fig = figure("2d_r2c_r2" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"2D real-to-complex transforms with radix "\
+ str(radix) \
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix 2",
dimension, min2d, max2d, nbatch, 2,
[2], "r2c",
forwards, inplace) )
figs.append(fig)
if 3 in rundims:
dimension = 3
for min3d, max3d, nbatch in [[16,128,1],[4,64,100]]:
for radix in [2, 3, 5]:
fig = figure("3d_c2c" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"3D complex transforms with radix "\
+ str(radix) \
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix " + str(radix),
dimension,
nextpow(min3d, radix), max3d,
nbatch,
radix, [1,1], "c2c",
forwards, inplace) )
figs.append(fig)
for radix in [2, 3]:
fig = figure("3d_r2c" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"3D real-to-complex transforms with radix "\
+ str(radix)\
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix " + str(radix),
dimension,
nextpow(min3d, radix), max3d,
nbatch,
radix, [1,1], "r2c",
forwards, inplace) )
figs.append(fig)
fig = figure("3d_c2r" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"3D complex-to-real transforms with radix "\
+ str(radix)
+ " and batch size " + str(nbatch) + ".")
for radix in [2]:
fig.runs.append( rundata("radix " + str(radix),
dimension,
nextpow(min3d, radix), max3d,
nbatch,
radix, [1,1], "r2c",
backwards, inplace) )
figs.append(fig)
fig = figure("3d_c2c_aspect" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"3D complex transforms "\
+ "with aspect ratio N:N:16N with radix "\
+ str(radix)\
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix 2",
dimension, min3d, max3d, nbatch, 2,
[1,16], "c2c",
forwards, inplace) )
figs.append(fig)
fig = figure("3d_r2c_aspect" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"3D real-to-complex transforms " \
+ "with aspect ratio N:N:16N with radix " \
+ str(radix)\
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix 2",
dimension, min3d, max3d, nbatch, 2,
[1,16], "r2c",
forwards, inplace) )
figs.append(fig)
return figs
def main(argv):
dloadexe = None
indirlist = []
outdirlist = []
labellist = []
docdir = "doc"
dryrun = False
nbatch = 1
speedup = True
datatype = "time"
shortrun = False
docformat = "pdf"
devicenum = 0
doAsy = True
nsample = 10
rundims = [1,2,3]
runtype = "benchmark"
secondtype = "none"
try:
opts, args = getopt.getopt(argv,"hb:D:f:Tt:i:o:l:S:sg:d:N:R:w:y:")
except getopt.GetoptError:
print("error in parsing arguments.")
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h"):
print(usage)
exit(0)
elif opt in ("-b"):
dloadexe = os.path.abspath(arg)
elif opt in ("-i"):
indirlist.append(arg)
elif opt in ("-i"):
indirlist.append(arg)
elif opt in ("-o"):
outdirlist.append(arg)
elif opt in ("-l"):
labellist.append(arg)
elif opt in ("-w"):
docdir = arg
elif opt in ("-T"):
dryrun = True
elif opt in ("-s"):
shortrun = True
elif opt in ("-g"):
if int(arg) == 0:
doAsy = False
if int(arg) == 1:
doAsy = True
elif opt in ("-d"):
devicenum = int(arg)
elif opt in ("-D"):
rundims = []
for val in arg.split(','):
rundims.append(int(val))
elif opt in ("-N"):
nsample = int(arg)
elif opt in ("-S"):
if int(arg) == 0:
speedup = False
if int(arg) == 1:
speedup = True
elif opt in ("-t"):
if arg not in ["time", "gflops", "roofline"]:
print("data type must be time or gflops or roofline")
print(usage)
sys.exit(1)
datatype = arg
elif opt in ("-y"):
if arg not in ["none", "gflops"]:
print("data type must be gflops or none")
print(usage)
sys.exit(1)
secondtype = arg
elif opt in ("-R"):
if arg not in ["report", "benchmark", "efficiency"]:
print("data type must be gflops or none")
print(usage)
sys.exit(1)
runtype = arg
if runtype == "efficiency":
datatype = "roofline"
elif opt in ("-f"):
goodvals = ["pdf", "docx"]
if arg not in goodvals:
print("error: format must in " + " ".join(goodvals))
print(usage)
sys.exit(1)
docformat = arg
print("rundims:")
print(rundims)
if not dryrun:
if dloadexe == None:
for indir in indirlist:
if not binaryisok(indir, "rocfft-rider"):
print("unable to find " + "rocfft-rider" + " in " + indir)
print("please specify with -i")
sys.exit(1)
else:
if not binaryisok(dloadexe, "dyna-rocfft-rider"):
print("unable to find " + "dyna-rocfft-rider" + " in " + dloadexe)
for indir in indirlist:
if not binaryisok(indir, "librocfft.so"):
print("unable to find " + "librocfft.so" + " in " + indir)
print("please specify with -i")
sys.exit(1)
print("input directories:", indirlist)
if len(indirlist) > len(labellist):
for i in range(len(labellist), len(indirlist)):
labellist.append("dir" + str(i))
print("run labels:", labellist)
for idx in range(len(indirlist)):
indirlist[idx] = os.path.abspath(indirlist[idx])
if len(indirlist) > len(outdirlist):
for i in range(len(outdirlist), len(indirlist)):
outdirlist.append(os.path.abspath("dir" + str(i)))
for idx in range(len(outdirlist)):
outdirlist[idx] = os.path.abspath(outdirlist[idx])
print("data output directories:", outdirlist)
if shortrun:
print("short run")
print("output format: " + docformat)
print("device number: " + str(devicenum))
docdir = os.path.abspath(docdir)
print("document output in", docdir)
if not os.path.exists(docdir):
os.makedirs(docdir)
for outdir in outdirlist:
if not os.path.exists(outdir):
os.makedirs(outdir)
if not dryrun:
import getspecs
specs = "Host info:\n"
specs += "\thostname: " + getspecs.gethostname() + "\n"
specs += "\tcpu info: " + getspecs.getcpu() + "\n"
specs += "\tram: " + getspecs.getram() + "\n"
specs += "\tdistro: " + getspecs.getdistro() + "\n"
specs += "\tkernel version: " + getspecs.getkernel() + "\n"
specs += "\trocm version: " + getspecs.getrocmversion() + "\n"
specs += "Device info:\n"
specs += "\tdevice: " + getspecs.getdeviceinfo(devicenum) + "\n"
specs += "\tvbios version: " + getspecs.getvbios(devicenum) + "\n"
specs += "\tvram: " + getspecs.getvram(devicenum) + "\n"
specs += "\tperformance level: " + getspecs.getperflevel(devicenum) + "\n"
specs += "\tsystem clock: " + getspecs.getsclk(devicenum) + "\n"
specs += "\tmemory clock: " + getspecs.getmclk(devicenum) + "\n"
for outdir in outdirlist:
with open(os.path.join(outdir, "specs.txt"), "w+") as f:
f.write(specs)
with open(os.path.join(outdir, "gpuid.txt"), "w") as f:
f.write(getspecs.getgpuid(devicenum))
figs = []
if runtype == "benchmark":
figs = benchfigs(rundims, shortrun)
if runtype == "report":
figs = reportfigs(rundims, shortrun)
if runtype == "efficiency":
figs = efficiencyfigs(rundims, shortrun)
just1dc2crad2 = runtype == "efficiency"
for idx, fig in enumerate(figs):
for idx2, fig2 in enumerate(figs):
if idx != idx2 and fig.name == fig2.name:
print("figures have the same name!")
print(fig.name)
print(fig2.name)
sys.exit(1)
for fig in figs:
print(fig.name)
# Run the tests and put output in the outdirs:
for run in fig.runs:
if not dryrun:
run.executerun(nsample, indirlist, outdirlist, dloadexe)
# Compile the data in the outdirs into figures in docdir:
ncompare = len(indirlist) if speedup else 0
print(fig.labels(labellist))
#plotgflops = runtype == "submission" and not datatype == "gflops"
print(fig.asycmd(docdir, outdirlist, labellist, docformat, datatype, ncompare, secondtype, just1dc2crad2))
fig.executeasy(docdir, outdirlist, labellist, docformat, datatype, ncompare, secondtype, just1dc2crad2)
# Make the document in docdir:
if docformat == "pdf":
maketex(figs, docdir, outdirlist, labellist, nsample, secondtype)
if docformat == "docx":
makedocx(figs, docdir, nsample, secondtype)
print("Finished! Output in " + docdir)
def binaryisok(dirname, progname):
prog = os.path.join(dirname, progname)
return os.path.isfile(prog)
gflopstext = '''\
GFLOP/s are computed based on the Cooley--Tukey operation count \
for a radix-2 transform, and half that for in the case of \
real-complex transforms. The rocFFT operation count may differ from \
this value: GFLOP/s is provided for the sake of comparison only.'''
# Function for generating a tex document in PDF format.
def maketex(figs, docdir, outdirlist, labellist, nsample, secondtype):
header = '''\documentclass[12pt]{article}
\\usepackage{graphicx}
\\usepackage{url}
\\author{<NAME>}
\\begin{document}
'''
texstring = header
texstring += "\n\\section{Introduction}\n"
texstring += "Each data point represents the median of " + str(nsample) + " values, with error bars showing the 95\\% confidence interval for the median. All transforms are double-precision.\n\n"
if secondtype == "gflops":
texstring += gflopstext + "\n\n"
texstring += "\\vspace{1cm}\n"
# texstring += "\\begin{tabular}{ll}"
# texstring += labelA +" &\\url{"+ dirA+"} \\\\\n"
# if not dirB == None:
# texstring += labelB +" &\\url{"+ dirB+"} \\\\\n"
# texstring += "\\end{tabular}\n\n"
# texstring += "\\vspace{1cm}\n"
texstring += "\n\\section{Device Specification}\n"
for idx in range(len(outdirlist)):
texstring += "\n\\subsection{" + labellist[idx] + "}\n"
specfilename = os.path.join(outdirlist[idx], "specs.txt")
if os.path.isfile(specfilename):
specs = ""
with open(specfilename, "r") as f:
specs = f.read()
for line in specs.split("\n"):
if line.startswith("Host info"):
texstring += "\\noindent " + line
texstring += "\\begin{itemize}\n"
elif line.startswith("Device info"):
texstring += "\\end{itemize}\n"
texstring += line
texstring += "\\begin{itemize}\n"
else:
if line.strip() != "":
texstring += "\\item " + line + "\n"
texstring += "\\end{itemize}\n"
texstring += "\n"
texstring += "\\clearpage\n"
texstring += "\n\\section{Figures}\n"
for idx, fig in enumerate(figs):
print(fig.filename(docdir, "pdf"))
print(fig.caption)
texstring += '''
\\centering
\\begin{figure}[htbp]
\\includegraphics[width=\\textwidth]{'''
texstring += fig.filename("", "pdf")
texstring += '''}
\\caption{''' + fig.caption + '''}
\\end{figure}
'''
if (idx % 2) == 0:
texstring += "\\clearpage\n"
texstring += "\n\\end{document}\n"
fname = os.path.join(docdir, 'figs.tex')
with open(fname, 'w') as outfile:
outfile.write(texstring)
fout = open(os.path.join(docdir, "texcmd.log"), 'w+')
ferr = open(os.path.join(docdir, "texcmd.err"), 'w+')
latexcmd = ["latexmk", "-pdf", 'figs.tex']
print(" ".join(latexcmd))
texproc = subprocess.Popen(latexcmd, cwd=docdir, stdout=fout, stderr=ferr,
env=os.environ.copy())
texproc.wait()
fout.close()
ferr.close()
texrc = texproc.returncode
if texrc != 0:
print("****tex fail****")
# Confert a PDF to an EMF using pdf2svg and inkscape.
def pdf2emf(pdfname):
svgname = pdfname.replace(".pdf",".svg")
cmd_pdf2svg = ["pdf2svg", pdfname, svgname]
proc = subprocess.Popen(cmd_pdf2svg, env=os.environ.copy())
proc.wait()
if proc.returncode != 0:
print("pdf2svg failed!")
sys.exit(1)
emfname = pdfname.replace(".pdf",".emf")
cmd_svg2emf = ["inkscape", svgname, "-M", emfname]
proc = subprocess.Popen(cmd_svg2emf, env=os.environ.copy())
proc.wait()
if proc.returncode != 0:
print("svg2emf failed!")
sys.exit(1)
return emfname
# Function for generating a docx using emf files and the docx package.
def makedocx(figs, outdir, nsample, secondtype):
import docx
document = docx.Document()
document.add_heading('rocFFT benchmarks', 0)
document.add_paragraph("Each data point represents the median of " + str(nsample) + " values, with error bars showing the 95% confidence interval for the median. Transforms are double-precision, forward, and in-place.")
if secondtype == "gflops":
document.add_paragraph(gflopstext)
specfilename = os.path.join(outdir, "specs.txt")
if os.path.isfile(specfilename):
with open(specfilename, "r") as f:
specs = f.read()
for line in specs.split("\n"):
document.add_paragraph(line)
for fig in figs:
print(fig.filename(outdir, "docx"))
print(fig.caption)
emfname = pdf2emf(fig.filename(outdir, "docx"))
document.add_picture(emfname, width=docx.shared.Inches(6))
document.add_paragraph(fig.caption)
document.save(os.path.join(outdir,'figs.docx'))
if __name__ == "__main__":
main(sys.argv[1:])
| #!/usr/bin/python3
import sys, getopt
import numpy as np
from math import *
import subprocess
import os
import re # regexp package
import shutil
import tempfile
usage = '''A timing script rocfft that generates a lot of data
Usage:
\talltime.py
\t\t-b Specify binary for dload executable (optional)
\t\t-i Append to list of binary directories (appendable)
\t\t-o Specify output directories for raw data
\t\t appendable; defaults to "dir0", "dir1", etc.
\t\t-l Specify labels for runs
\t\t appendable; defaults to "dir0", "dir1", etc.
\t\t-w output directory for graphs and final document
\t\t-S plot speedup (default: 1, disabled: 0)
\t\t-t data type: time (default) or gflops or roofline
\t\t-y secondary acix type: none or gflops
\t\t-s short run
\t\t-T do not perform FFTs; just generate document
\t\t-f document format: pdf (default) or docx
\t\t-g generate graphs via Asymptote: 0(default) or 1
\t\t-d device number (default: 0)
\t\t-N Number of samples (default: 10)
\t\t-D dims to test. default: 1,2,3
\t\t-R runtype: report benchmark or efficiency
'''
def nextpow(val, radix):
x = 1
while(x <= val):
x *= radix
return x
# A class for generating data for figures.
class rundata:
def __init__(self, label,
dimension, minsize, maxsize, nbatch, radix, ratio, ffttype,
direction, inplace):
self.dimension = dimension
self.minsize = minsize
self.maxsize = maxsize
self.nbatch = nbatch
self.radix = radix
self.ratio = ratio
self.ffttype = ffttype
self.precision = "double"
self.inplace = inplace
self.direction = direction
self.label = label
def outfilename(self, odir):
outfile = ""
outfile += "radix" + str(self.radix)
outfile += "_dim" + str(self.dimension)
outfile += "_" + self.precision
outfile += "_n" + str(self.nbatch)
if self.direction == 1:
outfile += "_inv"
if self.dimension > 1:
outfile += "_ratio" + "_" + str(self.ratio[0])
if self.dimension > 2:
outfile += "_" + str(self.ratio[1])
outfile += "_" + self.ffttype
if self.inplace:
outfile += "_inplace"
else:
outfile += "_outofplace"
outfile += ".dat"
outfile = os.path.join(odir, outfile)
return outfile
def runcmd(self, nsample, indirlist, outdirlist, dloadexe):
cmd = [os.path.join(sys.path[0],"timing.py")]
if dloadexe == None:
# When not using dload, we just have one input and output dir.
cmd.append("-w")
cmd.append(os.path.abspath(indirlist[0]))
cmd.append("-o")
cmd.append(self.outfilename(outdirlist[0]))
else:
cmd.append("-w")
cmd.append(dloadexe)
for indir in indirlist:
cmd.append("-i")
cmd.append(indir)
for outdir in outdirlist:
cmd.append("-o")
cmd.append(self.outfilename(outdir))
cmd.append("-N")
cmd.append(str(nsample))
cmd.append("-b")
cmd.append(str(self.nbatch))
cmd.append("-x")
cmd.append(str(self.minsize))
cmd.append("-X")
cmd.append(str(self.maxsize))
if self.dimension > 1:
cmd.append("-y")
cmd.append(str(self.minsize * self.ratio[0]))
cmd.append("-Y")
cmd.append(str(self.maxsize * self.ratio[0]))
if self.dimension > 2:
cmd.append("-z")
cmd.append(str(self.minsize * self.ratio[1]))
cmd.append("-Z")
cmd.append(str(self.maxsize * self.ratio[1]))
cmd.append("-r")
cmd.append(str(self.radix))
cmd.append("-D")
cmd.append(str(self.direction))
cmd.append("-d")
cmd.append(str(self.dimension))
cmd.append("-f")
cmd.append(self.precision)
if self.ffttype == "r2c":
cmd.append("-R")
return cmd
def executerun(self, nsample, indirlist, outdirlist, dloadexe):
fout = tempfile.TemporaryFile(mode="w+")
ferr = tempfile.TemporaryFile(mode="w+")
if dloadexe != None:
cmd = self.runcmd(nsample, indirlist, outdirlist, dloadexe)
print(" ".join(cmd))
proc = subprocess.Popen(cmd,
stdout=fout, stderr=ferr,
env=os.environ.copy())
# FIXME: copy log to multiple outputs?
proc.wait()
rc = proc.returncode
if rc != 0:
print("****fail****")
else:
for idx in range(min(len(indirlist), len(outdirlist))):
print(idx, ":", indirlist[idx], "->", outdirlist[idx], flush=True)
cmd = self.runcmd(nsample, [indirlist[idx]], [outdirlist[idx]], None)
print(" ".join(cmd))
proc = subprocess.Popen(cmd,
stdout=fout, stderr=ferr,
env=os.environ.copy())
proc.wait()
rc = proc.returncode
if rc != 0:
print("****fail****")
return 0
# Figure class, which contains runs and provides commands to generate figures.
class figure:
def __init__(self, name, caption):
self.name = name
self.runs = []
self.caption = caption
def inputfiles(self, outdirlist):
import os
files = []
for run in self.runs:
for outdir in outdirlist:
files.append(run.outfilename(outdir))
print(files)
return files
def labels(self, labellist):
labels = []
for run in self.runs:
for label in labellist:
labels.append(label + run.label)
return labels
def filename(self, outdir, docformat):
outfigure = self.name
outfigure += ".pdf"
# if docformat == "pdf":
# outfigure += ".pdf"
# if docformat == "docx":
# outfigure += ".png"
return os.path.join(outdir, outfigure)
def asycmd(self, docdir, outdirlist, labellist, docformat, datatype, ncompare, secondtype, just1dc2crad2):
asycmd = ["asy"]
asycmd.append("-f")
asycmd.append("pdf")
# if docformat == "pdf":
# asycmd.append("-f")
# asycmd.append("pdf")
# if docformat == "docx":
# asycmd.append("-f")
# asycmd.append("png")
# asycmd.append("-render")
# asycmd.append("8")
asycmd.append(os.path.join(sys.path[0],"datagraphs.asy"))
asycmd.append("-u")
inputfiles = self.inputfiles(outdirlist)
asycmd.append('filenames="' + ",".join(inputfiles) + '"')
asycmd.append("-u")
labels = self.labels(labellist)
asycmd.append('legendlist="' + ",".join(labels) + '"')
asycmd.append("-u")
asycmd.append('speedup=' + str(ncompare))
if just1dc2crad2 :
asycmd.append("-u")
asycmd.append('just1dc2crad2=true')
if secondtype == "gflops":
asycmd.append("-u")
asycmd.append('secondarygflops=true')
if datatype == "gflops":
asycmd.append("-u")
asycmd.append('primaryaxis="gflops"')
if datatype == "roofline":
asycmd.append("-u")
asycmd.append('primaryaxis="roofline"')
# roofline on multiple devices doesn't really make sense; just use the first device
with open(os.path.join(outdirlist[0], "gpuid.txt"), "r") as f:
gpuid = f.read()
asycmd.append("-u")
asycmd.append('gpuid="' + gpuid.strip() + '"')
if len(self.runs) > 0:
asycmd.append("-u")
asycmd.append('batchsize=' + str(self.runs[0].nbatch))
asycmd.append("-u")
asycmd.append('problemdim=' + str(self.runs[0].dimension))
asycmd.append("-u")
val = 1
for rat in self.runs[0].ratio:
val *= rat
asycmd.append('problemratio=' + str(val))
asycmd.append("-u")
if self.runs[0].ffttype == "r2c":
asycmd.append("realcomplex=true")
else:
asycmd.append("realcomplex=false")
asycmd.append("-o")
asycmd.append(self.filename(docdir, docformat) )
return asycmd
def executeasy(self, docdir, outdirs, labellist, docformat, datatype, ncompare, secondtype,
just1dc2crad2):
fout = tempfile.TemporaryFile(mode="w+")
ferr = tempfile.TemporaryFile(mode="w+")
asyproc = subprocess.Popen(self.asycmd(docdir, outdirs, labellist,
docformat, datatype, ncompare, secondtype,
just1dc2crad2),
stdout=fout, stderr=ferr, env=os.environ.copy(),
cwd = sys.path[0])
asyproc.wait()
asyrc = asyproc.returncode
if asyrc != 0:
print("****asy fail****")
fout.seek(0)
cout = fout.read()
print(cout)
ferr.seek(0)
cerr = ferr.read()
print(cerr)
return asyrc
# Function for generating figures for benchmark output
def benchfigs(rundims, shortrun):
figs = []
# FFT directions
forwards = -1
backwards = 1
if 1 in rundims:
dimension = 1
nbatch = 1
min1d = 256 if shortrun else 1024
max1d = 4000 if shortrun else 536870912
for inplace in [True, False]:
fig = figure("1d_c2c" + ("inplace" if inplace else "outofplace"),
"1D complex transforms " + ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix),
dimension, nextpow(min1d, radix), max1d, nbatch,
radix, [], "c2c", forwards, inplace) )
figs.append(fig)
for inplace in [True, False]:
fig = figure("1d_r2c" + ("inplace" if inplace else "outofplace")
, "1D real-to-complex transforms " \
+ ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix),
dimension, nextpow(min1d, radix), max1d, nbatch,
radix, [], "r2c", forwards, inplace) )
figs.append(fig)
for inplace in [True, False]:
fig = figure("1d_c2r" + ("inplace" if inplace else "outofplace"),
"1D complex-to-real transforms " \
+ ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix) ,
dimension, nextpow(min1d, radix), max1d, nbatch,
radix, [], "r2c", backwards, inplace) )
figs.append(fig)
if 2 in rundims:
dimension = 2
nbatch = 1
min2d = 64 if shortrun else 128
max2d = 8192 if shortrun else 32768
for inplace in [True, False]:
fig = figure("2d_c2c" + ("inplace" if inplace else "outofplace"),
"2D complex transforms " + ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix), dimension,
nextpow(min2d, radix), max2d, nbatch, radix, [1],
"c2c",
forwards, inplace) )
figs.append(fig)
for inplace in [True, False]:
fig = figure("2d_r2c" + ("inplace" if inplace else "outofplace"),
"2D real-to-complex transforms " \
+ ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix), dimension,
nextpow(min2d, radix), max2d, nbatch, radix, [1],
"r2c",
forwards, inplace) )
figs.append(fig)
for inplace in [True, False]:
fig = figure("2d_c2r" + ("inplace" if inplace else "outofplace"),
"2D complex-to-real transforms " \
+ ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix), dimension,
nextpow(min2d, radix), max2d, nbatch, radix, [1],
"r2c",
backwards, inplace) )
figs.append(fig)
if 3 in rundims:
dimension = 3
min3d = 16
max3d = 128 if shortrun else 1024
nbatch = 1
for inplace in [True]:
fig = figure("3d_c2c" + ("inplace" if inplace else "outofplace"),
"3D complex transforms " + ("in-place" if inplace else "out-of-place"))
for radix in [2, 3, 5]:
fig.runs.append( rundata("radix " + str(radix), dimension,
nextpow(min3d, radix), max3d, nbatch, radix, [1,1],
"c2c",
forwards, inplace) )
figs.append(fig)
for inplace in [True, False]:
fig = figure("3d_r2c" + ("inplace" if inplace else "outofplace")
, "3D real-to-complex transforms " \
+ ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix), dimension,
nextpow(min3d, radix), max3d, nbatch, radix, [1,1],
"r2c",
forwards, inplace) )
figs.append(fig)
for inplace in [True, False]:
fig = figure("3d_c2r" + ("inplace" if inplace else "outofplace"),
"3D complex-to-real transforms " \
+ ("in-place" if inplace else "out-of-place"))
for radix in [2, 3]:
fig.runs.append( rundata("radix " + str(radix), dimension,
nextpow(min3d, radix), max3d, nbatch, radix, [1,1],
"r2c",
backwards, inplace) )
figs.append(fig)
return figs
def efficiencyfigs(rundims, shortrun):
figs = []
# FFT directions
forwards = -1
backwards = 1
inplace = True
dimension = 1
radix = 2
min1d = 1024
max1d = 1048576 if shortrun else 268435456 #pow(2,28) gives a floating type :(
nbatch = 1
while max1d > min1d:
fig = figure("1d_c2c_batch" + str(nbatch) + "_radix" + str(radix),
"1D complex transforms " + ("in-place" if inplace else "out-of-place") + " radix " + str(radix) + " batch " + str(nbatch) )
fig.runs.append( rundata("radix " + str(radix),
dimension, nextpow(min1d, radix), max1d, nbatch,
radix, [], "c2c", forwards, inplace) )
figs.append(fig)
nbatch *= 2
max1d //= 2
min1d //= 2
min1d = max(min1d, 2^5)
return figs
# Function for generating figures for a performance report
def reportfigs(rundims, shortrun):
figs = []
# FFT directions
forwards = -1
backwards = 1
inplace = True
if 1 in rundims:
dimension = 1
for min1d, max1d, nbatch in [[1024,536870912,1], [8,32768,100000]]:
for radix in [2, 3, 5, 7]:
fig = figure("1d_c2c" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"1D complex transforms with radix " + str(radix)\
+ " and batch size " + str(nbatch) + "." )
fig.runs.append( rundata("radix " + str(radix),
dimension, nextpow(min1d, radix),
max1d, nbatch,
radix, [], "c2c", forwards,
inplace) )
figs.append(fig)
for radix in [2, 3, 5, 7]:
fig = figure("1d_r2c"\
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"1D real-to-complex transforms with radix "\
+ str(radix) \
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix " + str(radix),
dimension, nextpow(min1d, radix),
max1d, nbatch,
radix, [], "r2c", forwards,
inplace) )
figs.append(fig)
for radix in [2, 3, 5, 7]:
fig = figure("1d_c2r" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"1D complex-to-real transforms with radix " \
+ str(radix) \
+ " and batch size " + str(nbatch) + "." )
fig.runs.append( rundata("radix " + str(radix),
dimension, nextpow(min1d, radix),
max1d, nbatch,
radix, [], "r2c", backwards,
inplace) )
figs.append(fig)
if 2 in rundims:
dimension = 2
for min2d, max2d, nbatch in [[128,32768,1], [64,8192,100]]:
for radix in [2, 3, 5]:
fig = figure("2d_c2c" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch) ,
"2D complex transforms with radix " + str(radix)\
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata( "radix "+ str(radix),
dimension,
nextpow(min2d, radix), max2d,
nbatch,
radix, [1], "c2c",
forwards, inplace) )
figs.append(fig)
for radix in [2, 3, 5]:
fig = figure("2d_r2c" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"2D real-to-complex transforms with radix "\
+ str(radix) \
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata( "radix " + str(radix),
dimension,
nextpow(min2d, radix), max2d,
nbatch,
radix, [1], "r2c",
forwards, inplace) )
figs.append(fig)
for radix in [2, 3, 5]:
fig = figure("2d_c2r" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"2D complex-to-real transforms with radix "\
+ str(radix) +\
" and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix " + str(radix),
dimension,
nextpow(min2d, radix), max2d,
nbatch,
radix, [1], "r2c",
backwards, inplace) )
figs.append(fig)
for radix in [2]:
fig = figure("2d_c2c_r2" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"2D complex transforms "\
+ "with aspect ratio N:2N with radix "\
+ str(radix) + " and batch size " + str(nbatch) \
+ ".")
fig.runs.append( rundata( "radix 2",
dimension, min2d, max2d, nbatch, 2,
[2], "c2c",
forwards, inplace) )
figs.append(fig)
for radix in [2]:
fig = figure("2d_r2c_r2" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"2D real-to-complex transforms with radix "\
+ str(radix) \
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix 2",
dimension, min2d, max2d, nbatch, 2,
[2], "r2c",
forwards, inplace) )
figs.append(fig)
if 3 in rundims:
dimension = 3
for min3d, max3d, nbatch in [[16,128,1],[4,64,100]]:
for radix in [2, 3, 5]:
fig = figure("3d_c2c" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"3D complex transforms with radix "\
+ str(radix) \
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix " + str(radix),
dimension,
nextpow(min3d, radix), max3d,
nbatch,
radix, [1,1], "c2c",
forwards, inplace) )
figs.append(fig)
for radix in [2, 3]:
fig = figure("3d_r2c" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"3D real-to-complex transforms with radix "\
+ str(radix)\
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix " + str(radix),
dimension,
nextpow(min3d, radix), max3d,
nbatch,
radix, [1,1], "r2c",
forwards, inplace) )
figs.append(fig)
fig = figure("3d_c2r" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"3D complex-to-real transforms with radix "\
+ str(radix)
+ " and batch size " + str(nbatch) + ".")
for radix in [2]:
fig.runs.append( rundata("radix " + str(radix),
dimension,
nextpow(min3d, radix), max3d,
nbatch,
radix, [1,1], "r2c",
backwards, inplace) )
figs.append(fig)
fig = figure("3d_c2c_aspect" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"3D complex transforms "\
+ "with aspect ratio N:N:16N with radix "\
+ str(radix)\
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix 2",
dimension, min3d, max3d, nbatch, 2,
[1,16], "c2c",
forwards, inplace) )
figs.append(fig)
fig = figure("3d_r2c_aspect" \
+ "_radix" + str(radix) \
+ "_batch" + str(nbatch),
"3D real-to-complex transforms " \
+ "with aspect ratio N:N:16N with radix " \
+ str(radix)\
+ " and batch size " + str(nbatch) + ".")
fig.runs.append( rundata("radix 2",
dimension, min3d, max3d, nbatch, 2,
[1,16], "r2c",
forwards, inplace) )
figs.append(fig)
return figs
def main(argv):
dloadexe = None
indirlist = []
outdirlist = []
labellist = []
docdir = "doc"
dryrun = False
nbatch = 1
speedup = True
datatype = "time"
shortrun = False
docformat = "pdf"
devicenum = 0
doAsy = True
nsample = 10
rundims = [1,2,3]
runtype = "benchmark"
secondtype = "none"
try:
opts, args = getopt.getopt(argv,"hb:D:f:Tt:i:o:l:S:sg:d:N:R:w:y:")
except getopt.GetoptError:
print("error in parsing arguments.")
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h"):
print(usage)
exit(0)
elif opt in ("-b"):
dloadexe = os.path.abspath(arg)
elif opt in ("-i"):
indirlist.append(arg)
elif opt in ("-i"):
indirlist.append(arg)
elif opt in ("-o"):
outdirlist.append(arg)
elif opt in ("-l"):
labellist.append(arg)
elif opt in ("-w"):
docdir = arg
elif opt in ("-T"):
dryrun = True
elif opt in ("-s"):
shortrun = True
elif opt in ("-g"):
if int(arg) == 0:
doAsy = False
if int(arg) == 1:
doAsy = True
elif opt in ("-d"):
devicenum = int(arg)
elif opt in ("-D"):
rundims = []
for val in arg.split(','):
rundims.append(int(val))
elif opt in ("-N"):
nsample = int(arg)
elif opt in ("-S"):
if int(arg) == 0:
speedup = False
if int(arg) == 1:
speedup = True
elif opt in ("-t"):
if arg not in ["time", "gflops", "roofline"]:
print("data type must be time or gflops or roofline")
print(usage)
sys.exit(1)
datatype = arg
elif opt in ("-y"):
if arg not in ["none", "gflops"]:
print("data type must be gflops or none")
print(usage)
sys.exit(1)
secondtype = arg
elif opt in ("-R"):
if arg not in ["report", "benchmark", "efficiency"]:
print("data type must be gflops or none")
print(usage)
sys.exit(1)
runtype = arg
if runtype == "efficiency":
datatype = "roofline"
elif opt in ("-f"):
goodvals = ["pdf", "docx"]
if arg not in goodvals:
print("error: format must in " + " ".join(goodvals))
print(usage)
sys.exit(1)
docformat = arg
print("rundims:")
print(rundims)
if not dryrun:
if dloadexe == None:
for indir in indirlist:
if not binaryisok(indir, "rocfft-rider"):
print("unable to find " + "rocfft-rider" + " in " + indir)
print("please specify with -i")
sys.exit(1)
else:
if not binaryisok(dloadexe, "dyna-rocfft-rider"):
print("unable to find " + "dyna-rocfft-rider" + " in " + dloadexe)
for indir in indirlist:
if not binaryisok(indir, "librocfft.so"):
print("unable to find " + "librocfft.so" + " in " + indir)
print("please specify with -i")
sys.exit(1)
print("input directories:", indirlist)
if len(indirlist) > len(labellist):
for i in range(len(labellist), len(indirlist)):
labellist.append("dir" + str(i))
print("run labels:", labellist)
for idx in range(len(indirlist)):
indirlist[idx] = os.path.abspath(indirlist[idx])
if len(indirlist) > len(outdirlist):
for i in range(len(outdirlist), len(indirlist)):
outdirlist.append(os.path.abspath("dir" + str(i)))
for idx in range(len(outdirlist)):
outdirlist[idx] = os.path.abspath(outdirlist[idx])
print("data output directories:", outdirlist)
if shortrun:
print("short run")
print("output format: " + docformat)
print("device number: " + str(devicenum))
docdir = os.path.abspath(docdir)
print("document output in", docdir)
if not os.path.exists(docdir):
os.makedirs(docdir)
for outdir in outdirlist:
if not os.path.exists(outdir):
os.makedirs(outdir)
if not dryrun:
import getspecs
specs = "Host info:\n"
specs += "\thostname: " + getspecs.gethostname() + "\n"
specs += "\tcpu info: " + getspecs.getcpu() + "\n"
specs += "\tram: " + getspecs.getram() + "\n"
specs += "\tdistro: " + getspecs.getdistro() + "\n"
specs += "\tkernel version: " + getspecs.getkernel() + "\n"
specs += "\trocm version: " + getspecs.getrocmversion() + "\n"
specs += "Device info:\n"
specs += "\tdevice: " + getspecs.getdeviceinfo(devicenum) + "\n"
specs += "\tvbios version: " + getspecs.getvbios(devicenum) + "\n"
specs += "\tvram: " + getspecs.getvram(devicenum) + "\n"
specs += "\tperformance level: " + getspecs.getperflevel(devicenum) + "\n"
specs += "\tsystem clock: " + getspecs.getsclk(devicenum) + "\n"
specs += "\tmemory clock: " + getspecs.getmclk(devicenum) + "\n"
for outdir in outdirlist:
with open(os.path.join(outdir, "specs.txt"), "w+") as f:
f.write(specs)
with open(os.path.join(outdir, "gpuid.txt"), "w") as f:
f.write(getspecs.getgpuid(devicenum))
figs = []
if runtype == "benchmark":
figs = benchfigs(rundims, shortrun)
if runtype == "report":
figs = reportfigs(rundims, shortrun)
if runtype == "efficiency":
figs = efficiencyfigs(rundims, shortrun)
just1dc2crad2 = runtype == "efficiency"
for idx, fig in enumerate(figs):
for idx2, fig2 in enumerate(figs):
if idx != idx2 and fig.name == fig2.name:
print("figures have the same name!")
print(fig.name)
print(fig2.name)
sys.exit(1)
for fig in figs:
print(fig.name)
# Run the tests and put output in the outdirs:
for run in fig.runs:
if not dryrun:
run.executerun(nsample, indirlist, outdirlist, dloadexe)
# Compile the data in the outdirs into figures in docdir:
ncompare = len(indirlist) if speedup else 0
print(fig.labels(labellist))
#plotgflops = runtype == "submission" and not datatype == "gflops"
print(fig.asycmd(docdir, outdirlist, labellist, docformat, datatype, ncompare, secondtype, just1dc2crad2))
fig.executeasy(docdir, outdirlist, labellist, docformat, datatype, ncompare, secondtype, just1dc2crad2)
# Make the document in docdir:
if docformat == "pdf":
maketex(figs, docdir, outdirlist, labellist, nsample, secondtype)
if docformat == "docx":
makedocx(figs, docdir, nsample, secondtype)
print("Finished! Output in " + docdir)
def binaryisok(dirname, progname):
prog = os.path.join(dirname, progname)
return os.path.isfile(prog)
gflopstext = '''\
GFLOP/s are computed based on the Cooley--Tukey operation count \
for a radix-2 transform, and half that for in the case of \
real-complex transforms. The rocFFT operation count may differ from \
this value: GFLOP/s is provided for the sake of comparison only.'''
# Function for generating a tex document in PDF format.
def maketex(figs, docdir, outdirlist, labellist, nsample, secondtype):
header = '''\documentclass[12pt]{article}
\\usepackage{graphicx}
\\usepackage{url}
\\author{<NAME>}
\\begin{document}
'''
texstring = header
texstring += "\n\\section{Introduction}\n"
texstring += "Each data point represents the median of " + str(nsample) + " values, with error bars showing the 95\\% confidence interval for the median. All transforms are double-precision.\n\n"
if secondtype == "gflops":
texstring += gflopstext + "\n\n"
texstring += "\\vspace{1cm}\n"
# texstring += "\\begin{tabular}{ll}"
# texstring += labelA +" &\\url{"+ dirA+"} \\\\\n"
# if not dirB == None:
# texstring += labelB +" &\\url{"+ dirB+"} \\\\\n"
# texstring += "\\end{tabular}\n\n"
# texstring += "\\vspace{1cm}\n"
texstring += "\n\\section{Device Specification}\n"
for idx in range(len(outdirlist)):
texstring += "\n\\subsection{" + labellist[idx] + "}\n"
specfilename = os.path.join(outdirlist[idx], "specs.txt")
if os.path.isfile(specfilename):
specs = ""
with open(specfilename, "r") as f:
specs = f.read()
for line in specs.split("\n"):
if line.startswith("Host info"):
texstring += "\\noindent " + line
texstring += "\\begin{itemize}\n"
elif line.startswith("Device info"):
texstring += "\\end{itemize}\n"
texstring += line
texstring += "\\begin{itemize}\n"
else:
if line.strip() != "":
texstring += "\\item " + line + "\n"
texstring += "\\end{itemize}\n"
texstring += "\n"
texstring += "\\clearpage\n"
texstring += "\n\\section{Figures}\n"
for idx, fig in enumerate(figs):
print(fig.filename(docdir, "pdf"))
print(fig.caption)
texstring += '''
\\centering
\\begin{figure}[htbp]
\\includegraphics[width=\\textwidth]{'''
texstring += fig.filename("", "pdf")
texstring += '''}
\\caption{''' + fig.caption + '''}
\\end{figure}
'''
if (idx % 2) == 0:
texstring += "\\clearpage\n"
texstring += "\n\\end{document}\n"
fname = os.path.join(docdir, 'figs.tex')
with open(fname, 'w') as outfile:
outfile.write(texstring)
fout = open(os.path.join(docdir, "texcmd.log"), 'w+')
ferr = open(os.path.join(docdir, "texcmd.err"), 'w+')
latexcmd = ["latexmk", "-pdf", 'figs.tex']
print(" ".join(latexcmd))
texproc = subprocess.Popen(latexcmd, cwd=docdir, stdout=fout, stderr=ferr,
env=os.environ.copy())
texproc.wait()
fout.close()
ferr.close()
texrc = texproc.returncode
if texrc != 0:
print("****tex fail****")
# Confert a PDF to an EMF using pdf2svg and inkscape.
def pdf2emf(pdfname):
svgname = pdfname.replace(".pdf",".svg")
cmd_pdf2svg = ["pdf2svg", pdfname, svgname]
proc = subprocess.Popen(cmd_pdf2svg, env=os.environ.copy())
proc.wait()
if proc.returncode != 0:
print("pdf2svg failed!")
sys.exit(1)
emfname = pdfname.replace(".pdf",".emf")
cmd_svg2emf = ["inkscape", svgname, "-M", emfname]
proc = subprocess.Popen(cmd_svg2emf, env=os.environ.copy())
proc.wait()
if proc.returncode != 0:
print("svg2emf failed!")
sys.exit(1)
return emfname
# Function for generating a docx using emf files and the docx package.
def makedocx(figs, outdir, nsample, secondtype):
import docx
document = docx.Document()
document.add_heading('rocFFT benchmarks', 0)
document.add_paragraph("Each data point represents the median of " + str(nsample) + " values, with error bars showing the 95% confidence interval for the median. Transforms are double-precision, forward, and in-place.")
if secondtype == "gflops":
document.add_paragraph(gflopstext)
specfilename = os.path.join(outdir, "specs.txt")
if os.path.isfile(specfilename):
with open(specfilename, "r") as f:
specs = f.read()
for line in specs.split("\n"):
document.add_paragraph(line)
for fig in figs:
print(fig.filename(outdir, "docx"))
print(fig.caption)
emfname = pdf2emf(fig.filename(outdir, "docx"))
document.add_picture(emfname, width=docx.shared.Inches(6))
document.add_paragraph(fig.caption)
document.save(os.path.join(outdir,'figs.docx'))
if __name__ == "__main__":
main(sys.argv[1:])
| en | 0.578256 | #!/usr/bin/python3 # regexp package A timing script rocfft that generates a lot of data Usage: \talltime.py \t\t-b Specify binary for dload executable (optional) \t\t-i Append to list of binary directories (appendable) \t\t-o Specify output directories for raw data \t\t appendable; defaults to "dir0", "dir1", etc. \t\t-l Specify labels for runs \t\t appendable; defaults to "dir0", "dir1", etc. \t\t-w output directory for graphs and final document \t\t-S plot speedup (default: 1, disabled: 0) \t\t-t data type: time (default) or gflops or roofline \t\t-y secondary acix type: none or gflops \t\t-s short run \t\t-T do not perform FFTs; just generate document \t\t-f document format: pdf (default) or docx \t\t-g generate graphs via Asymptote: 0(default) or 1 \t\t-d device number (default: 0) \t\t-N Number of samples (default: 10) \t\t-D dims to test. default: 1,2,3 \t\t-R runtype: report benchmark or efficiency # A class for generating data for figures. # When not using dload, we just have one input and output dir. # FIXME: copy log to multiple outputs? # Figure class, which contains runs and provides commands to generate figures. # if docformat == "pdf": # outfigure += ".pdf" # if docformat == "docx": # outfigure += ".png" # if docformat == "pdf": # asycmd.append("-f") # asycmd.append("pdf") # if docformat == "docx": # asycmd.append("-f") # asycmd.append("png") # asycmd.append("-render") # asycmd.append("8") # roofline on multiple devices doesn't really make sense; just use the first device # Function for generating figures for benchmark output # FFT directions # FFT directions #pow(2,28) gives a floating type :( # Function for generating figures for a performance report # FFT directions # Run the tests and put output in the outdirs: # Compile the data in the outdirs into figures in docdir: #plotgflops = runtype == "submission" and not datatype == "gflops" # Make the document in docdir: \ GFLOP/s are computed based on the Cooley--Tukey operation count \ for a radix-2 transform, and half that for in the case of \ real-complex transforms. The rocFFT operation count may differ from \ this value: GFLOP/s is provided for the sake of comparison only. # Function for generating a tex document in PDF format. \documentclass[12pt]{article} \\usepackage{graphicx} \\usepackage{url} \\author{<NAME>} \\begin{document} # texstring += "\\begin{tabular}{ll}" # texstring += labelA +" &\\url{"+ dirA+"} \\\\\n" # if not dirB == None: # texstring += labelB +" &\\url{"+ dirB+"} \\\\\n" # texstring += "\\end{tabular}\n\n" # texstring += "\\vspace{1cm}\n" \\centering \\begin{figure}[htbp] \\includegraphics[width=\\textwidth]{ } \\caption{ } \\end{figure} # Confert a PDF to an EMF using pdf2svg and inkscape. # Function for generating a docx using emf files and the docx package. | 2.298219 | 2 |
zerver/migrations/0182_set_initial_value_is_private_flag.py | yuroitaki/zulip | 4 | 6630428 | <reponame>yuroitaki/zulip<filename>zerver/migrations/0182_set_initial_value_is_private_flag.py<gh_stars>1-10
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models import F
def set_initial_value_of_is_private_flag(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
UserMessage = apps.get_model("zerver", "UserMessage")
Message = apps.get_model("zerver", "Message")
if not Message.objects.exists():
return
i = 0
# Total is only used for the progress bar
total = Message.objects.filter(recipient__type__in=[1, 3]).count()
processed = 0
print("\nStart setting initial value for is_private flag...", flush=True)
while True:
range_end = i + 10000
# Can't use [Recipient.PERSONAL, Recipient.HUDDLE] in migration files
message_ids = list(
Message.objects.filter(recipient__type__in=[1, 3], id__gt=i, id__lte=range_end)
.values_list("id", flat=True)
.order_by("id")
)
count = UserMessage.objects.filter(message_id__in=message_ids).update(
flags=F("flags").bitor(UserMessage.flags.is_private)
)
if count == 0 and range_end >= Message.objects.last().id:
break
i = range_end
processed += len(message_ids)
if total != 0:
percent = round((processed / total) * 100, 2)
else:
percent = 100.00
print(f"Processed {processed}/{total} {percent}%", flush=True)
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0181_userprofile_change_emojiset"),
]
operations = [
migrations.RunPython(
set_initial_value_of_is_private_flag,
reverse_code=migrations.RunPython.noop,
elidable=True,
),
]
| from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models import F
def set_initial_value_of_is_private_flag(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
UserMessage = apps.get_model("zerver", "UserMessage")
Message = apps.get_model("zerver", "Message")
if not Message.objects.exists():
return
i = 0
# Total is only used for the progress bar
total = Message.objects.filter(recipient__type__in=[1, 3]).count()
processed = 0
print("\nStart setting initial value for is_private flag...", flush=True)
while True:
range_end = i + 10000
# Can't use [Recipient.PERSONAL, Recipient.HUDDLE] in migration files
message_ids = list(
Message.objects.filter(recipient__type__in=[1, 3], id__gt=i, id__lte=range_end)
.values_list("id", flat=True)
.order_by("id")
)
count = UserMessage.objects.filter(message_id__in=message_ids).update(
flags=F("flags").bitor(UserMessage.flags.is_private)
)
if count == 0 and range_end >= Message.objects.last().id:
break
i = range_end
processed += len(message_ids)
if total != 0:
percent = round((processed / total) * 100, 2)
else:
percent = 100.00
print(f"Processed {processed}/{total} {percent}%", flush=True)
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0181_userprofile_change_emojiset"),
]
operations = [
migrations.RunPython(
set_initial_value_of_is_private_flag,
reverse_code=migrations.RunPython.noop,
elidable=True,
),
] | en | 0.791983 | # Total is only used for the progress bar # Can't use [Recipient.PERSONAL, Recipient.HUDDLE] in migration files | 1.980587 | 2 |
cirq/optimizers/stratify.py | lilies/Cirq | 1 | 6630429 | <reponame>lilies/Cirq
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import TYPE_CHECKING, Type, Callable, Union, Iterable, Set
from cirq import ops, circuits
if TYPE_CHECKING:
import cirq
# A function that decides based on an operation
# whether it belongs to a class or not
Classifier = Callable[['cirq.Operation'], bool]
# Any of the possible operation categories that we can stratify on.
Category = Union['cirq.Gate', 'cirq.Operation', Type['cirq.Gate'],
Type['cirq.Operation'], Classifier]
def stratified_circuit(circuit: 'cirq.Circuit', *,
categories: Iterable[Category]) -> 'cirq.Circuit':
"""Repacks avoiding simultaneous operations with different classes.
Sometimes, certain operations should not be done at the same time. For
example, the physical hardware may not be capable of doing certain
operations at the same time. Or it may have worse noise characteristics
when certain operations are done at the same time. In these cases, it
would be good to rearrange the circuit so that these operations always
occur in different moments.
(As a secondary effect, this may make the circuit easier to read.)
This methods takes a series of classifiers identifying categories of
operations and then ensures operations from each category only overlap
with operations from the same category. There is no guarantee that the
resulting circuit will be optimally packed under this constraint.
Args:
circuit: The circuit whose operations should be re-arranged.
categories: A list of classifiers picking out certain operations.
There are several ways to specify a classifier. You can pass
in a gate instance (e.g. `cirq.X`), a gate type (e.g.
`cirq.XPowGate`), an operation instance (e.g.
`cirq.X(cirq.LineQubit(0))`), an operation type (e.g.
`cirq.GlobalPhaseOperation`), or an arbitrary operation
predicate (e.g. `lambda op: len(op.qubits) == 2`).
Returns:
A copy of the original circuit, but with re-arranged operations.
"""
# Normalize categories into classifier functions.
classifiers = [_category_to_classifier(category) for category in categories]
# Make the classifiers exhaustive by adding an "everything else" bucket.
and_the_rest = lambda op: all(
not classifier(op) for classifier in classifiers)
classifiers_and_the_rest = [*classifiers, and_the_rest]
# Try the algorithm with each permutation of the classifiers.
classifiers_permutations = list(
itertools.permutations(classifiers_and_the_rest))
reversed_circuit = circuit[::-1]
solutions = []
for c in classifiers_permutations:
solutions.append(stratify_circuit(list(c), circuit))
# Do the same thing, except this time in reverse. This helps for some
# circuits because it inserts operations at the end instead of at the
# beginning.
solutions.append(stratify_circuit(list(c), reversed_circuit)[::-1])
# Return the shortest circuit.
return min(solutions, key=lambda c: len(c))
def stratify_circuit(classifiers: Iterable[Classifier],
circuit: circuits.Circuit):
"""Performs the stratification by iterating through the operations in the
circuit and using the given classifiers to align them.
Args:
classifiers: A list of rules to align the circuit. Must be exhaustive,
i.e. all operations will be caught by one of the processors.
circuit: The circuit to break out into homogeneous moments. Will not be
edited.
Returns:
The stratified circuit.
"""
solution = circuits.Circuit()
circuit_copy = circuit.copy()
while len(circuit_copy.all_qubits()) > 0:
for classifier in classifiers:
current_moment = ops.Moment()
blocked_qubits: Set[ops.Qid] = set()
for moment_idx, moment in enumerate(circuit_copy.moments):
for op in moment.operations:
can_insert = classifier(op)
if not can_insert:
blocked_qubits.update(op.qubits)
else:
# Ensure that all the qubits for this operation are
# still available.
if not any(
qubit in blocked_qubits for qubit in op.qubits):
# Add the operation to the current moment and
# remove it from the circuit.
current_moment = current_moment.with_operation(op)
blocked_qubits.update(op.qubits)
circuit_copy.batch_remove([(moment_idx, op)])
# Short-circuit: If all the qubits are blocked, go on to the
# next moment.
if blocked_qubits.issuperset(circuit_copy.all_qubits()):
break
if len(current_moment) > 0:
solution.append(current_moment)
return solution
# No type for `category` because MyPy does not keep the return type when
# returning a callback.
def _category_to_classifier(category) -> Classifier:
"""Normalizes the given category into a classifier function."""
if isinstance(category, ops.Gate):
return lambda op: op.gate == category
if isinstance(category, ops.Operation):
return lambda op: op == category
elif isinstance(category, type) and issubclass(category, ops.Gate):
return lambda op: isinstance(op.gate, category)
elif isinstance(category, type) and issubclass(category, ops.Operation):
return lambda op: isinstance(op, category)
elif callable(category):
return lambda op: category(op)
else:
raise TypeError(f'Unrecognized classifier type '
f'{type(category)} ({category!r}).\n'
f'Expected a cirq.Gate, cirq.Operation, '
f'Type[cirq.Gate], Type[cirq.Operation], '
f'or Callable[[cirq.Operation], bool].')
| # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import TYPE_CHECKING, Type, Callable, Union, Iterable, Set
from cirq import ops, circuits
if TYPE_CHECKING:
import cirq
# A function that decides based on an operation
# whether it belongs to a class or not
Classifier = Callable[['cirq.Operation'], bool]
# Any of the possible operation categories that we can stratify on.
Category = Union['cirq.Gate', 'cirq.Operation', Type['cirq.Gate'],
Type['cirq.Operation'], Classifier]
def stratified_circuit(circuit: 'cirq.Circuit', *,
categories: Iterable[Category]) -> 'cirq.Circuit':
"""Repacks avoiding simultaneous operations with different classes.
Sometimes, certain operations should not be done at the same time. For
example, the physical hardware may not be capable of doing certain
operations at the same time. Or it may have worse noise characteristics
when certain operations are done at the same time. In these cases, it
would be good to rearrange the circuit so that these operations always
occur in different moments.
(As a secondary effect, this may make the circuit easier to read.)
This methods takes a series of classifiers identifying categories of
operations and then ensures operations from each category only overlap
with operations from the same category. There is no guarantee that the
resulting circuit will be optimally packed under this constraint.
Args:
circuit: The circuit whose operations should be re-arranged.
categories: A list of classifiers picking out certain operations.
There are several ways to specify a classifier. You can pass
in a gate instance (e.g. `cirq.X`), a gate type (e.g.
`cirq.XPowGate`), an operation instance (e.g.
`cirq.X(cirq.LineQubit(0))`), an operation type (e.g.
`cirq.GlobalPhaseOperation`), or an arbitrary operation
predicate (e.g. `lambda op: len(op.qubits) == 2`).
Returns:
A copy of the original circuit, but with re-arranged operations.
"""
# Normalize categories into classifier functions.
classifiers = [_category_to_classifier(category) for category in categories]
# Make the classifiers exhaustive by adding an "everything else" bucket.
and_the_rest = lambda op: all(
not classifier(op) for classifier in classifiers)
classifiers_and_the_rest = [*classifiers, and_the_rest]
# Try the algorithm with each permutation of the classifiers.
classifiers_permutations = list(
itertools.permutations(classifiers_and_the_rest))
reversed_circuit = circuit[::-1]
solutions = []
for c in classifiers_permutations:
solutions.append(stratify_circuit(list(c), circuit))
# Do the same thing, except this time in reverse. This helps for some
# circuits because it inserts operations at the end instead of at the
# beginning.
solutions.append(stratify_circuit(list(c), reversed_circuit)[::-1])
# Return the shortest circuit.
return min(solutions, key=lambda c: len(c))
def stratify_circuit(classifiers: Iterable[Classifier],
circuit: circuits.Circuit):
"""Performs the stratification by iterating through the operations in the
circuit and using the given classifiers to align them.
Args:
classifiers: A list of rules to align the circuit. Must be exhaustive,
i.e. all operations will be caught by one of the processors.
circuit: The circuit to break out into homogeneous moments. Will not be
edited.
Returns:
The stratified circuit.
"""
solution = circuits.Circuit()
circuit_copy = circuit.copy()
while len(circuit_copy.all_qubits()) > 0:
for classifier in classifiers:
current_moment = ops.Moment()
blocked_qubits: Set[ops.Qid] = set()
for moment_idx, moment in enumerate(circuit_copy.moments):
for op in moment.operations:
can_insert = classifier(op)
if not can_insert:
blocked_qubits.update(op.qubits)
else:
# Ensure that all the qubits for this operation are
# still available.
if not any(
qubit in blocked_qubits for qubit in op.qubits):
# Add the operation to the current moment and
# remove it from the circuit.
current_moment = current_moment.with_operation(op)
blocked_qubits.update(op.qubits)
circuit_copy.batch_remove([(moment_idx, op)])
# Short-circuit: If all the qubits are blocked, go on to the
# next moment.
if blocked_qubits.issuperset(circuit_copy.all_qubits()):
break
if len(current_moment) > 0:
solution.append(current_moment)
return solution
# No type for `category` because MyPy does not keep the return type when
# returning a callback.
def _category_to_classifier(category) -> Classifier:
"""Normalizes the given category into a classifier function."""
if isinstance(category, ops.Gate):
return lambda op: op.gate == category
if isinstance(category, ops.Operation):
return lambda op: op == category
elif isinstance(category, type) and issubclass(category, ops.Gate):
return lambda op: isinstance(op.gate, category)
elif isinstance(category, type) and issubclass(category, ops.Operation):
return lambda op: isinstance(op, category)
elif callable(category):
return lambda op: category(op)
else:
raise TypeError(f'Unrecognized classifier type '
f'{type(category)} ({category!r}).\n'
f'Expected a cirq.Gate, cirq.Operation, '
f'Type[cirq.Gate], Type[cirq.Operation], '
f'or Callable[[cirq.Operation], bool].') | en | 0.869646 | # Copyright 2020 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A function that decides based on an operation # whether it belongs to a class or not # Any of the possible operation categories that we can stratify on. Repacks avoiding simultaneous operations with different classes. Sometimes, certain operations should not be done at the same time. For example, the physical hardware may not be capable of doing certain operations at the same time. Or it may have worse noise characteristics when certain operations are done at the same time. In these cases, it would be good to rearrange the circuit so that these operations always occur in different moments. (As a secondary effect, this may make the circuit easier to read.) This methods takes a series of classifiers identifying categories of operations and then ensures operations from each category only overlap with operations from the same category. There is no guarantee that the resulting circuit will be optimally packed under this constraint. Args: circuit: The circuit whose operations should be re-arranged. categories: A list of classifiers picking out certain operations. There are several ways to specify a classifier. You can pass in a gate instance (e.g. `cirq.X`), a gate type (e.g. `cirq.XPowGate`), an operation instance (e.g. `cirq.X(cirq.LineQubit(0))`), an operation type (e.g. `cirq.GlobalPhaseOperation`), or an arbitrary operation predicate (e.g. `lambda op: len(op.qubits) == 2`). Returns: A copy of the original circuit, but with re-arranged operations. # Normalize categories into classifier functions. # Make the classifiers exhaustive by adding an "everything else" bucket. # Try the algorithm with each permutation of the classifiers. # Do the same thing, except this time in reverse. This helps for some # circuits because it inserts operations at the end instead of at the # beginning. # Return the shortest circuit. Performs the stratification by iterating through the operations in the circuit and using the given classifiers to align them. Args: classifiers: A list of rules to align the circuit. Must be exhaustive, i.e. all operations will be caught by one of the processors. circuit: The circuit to break out into homogeneous moments. Will not be edited. Returns: The stratified circuit. # Ensure that all the qubits for this operation are # still available. # Add the operation to the current moment and # remove it from the circuit. # Short-circuit: If all the qubits are blocked, go on to the # next moment. # No type for `category` because MyPy does not keep the return type when # returning a callback. Normalizes the given category into a classifier function. | 2.987052 | 3 |
python_modules/dagster/dagster_tests/execution_tests/test_event_metadata.py | souterjk/dagster | 0 | 6630430 | <reponame>souterjk/dagster
import pytest
from dagster import (
AssetMaterialization,
AssetObservation,
DagsterEventType,
EventMetadata,
FloatMetadataEntryData,
IntMetadataEntryData,
PythonArtifactMetadataEntryData,
TextMetadataEntryData,
UrlMetadataEntryData,
execute_pipeline,
pipeline,
solid,
)
from dagster.check import CheckError
from dagster.core.definitions.event_metadata import (
DagsterInvalidEventMetadata,
EventMetadataEntry,
parse_metadata,
)
from dagster.core.definitions.event_metadata.table import (
TableColumn,
TableColumnConstraints,
TableConstraints,
TableRecord,
TableSchema,
)
from dagster.utils import frozendict
def solid_events_for_type(result, solid_name, event_type):
solid_result = result.result_for_solid(solid_name)
return [
compute_step_event
for compute_step_event in solid_result.compute_step_events
if compute_step_event.event_type == event_type
]
def test_event_metadata_asset_materialization():
@solid(output_defs=[])
def the_solid(_context):
yield AssetMaterialization(
asset_key="foo",
metadata={
"text": "FOO",
"int": 22,
"url": EventMetadata.url("http://fake.com"),
"float": 0.1,
"python": EventMetadata.python_artifact(EventMetadata),
},
)
@pipeline
def the_pipeline():
the_solid()
result = execute_pipeline(the_pipeline)
assert result
assert result.success
materialization_events = solid_events_for_type(
result, "the_solid", DagsterEventType.ASSET_MATERIALIZATION
)
assert len(materialization_events) == 1
materialization = materialization_events[0].event_specific_data.materialization
assert len(materialization.metadata_entries) == 5
entry_map = {
entry.label: entry.entry_data.__class__ for entry in materialization.metadata_entries
}
assert entry_map["text"] == TextMetadataEntryData
assert entry_map["int"] == IntMetadataEntryData
assert entry_map["url"] == UrlMetadataEntryData
assert entry_map["float"] == FloatMetadataEntryData
assert entry_map["python"] == PythonArtifactMetadataEntryData
def test_event_metadata_asset_observation():
@solid(output_defs=[])
def the_solid(_context):
yield AssetObservation(
asset_key="foo",
metadata={
"text": "FOO",
"int": 22,
"url": EventMetadata.url("http://fake.com"),
"float": 0.1,
"python": EventMetadata.python_artifact(EventMetadata),
},
)
@pipeline
def the_pipeline():
the_solid()
result = execute_pipeline(the_pipeline)
assert result
assert result.success
observation_events = solid_events_for_type(
result, "the_solid", DagsterEventType.ASSET_OBSERVATION
)
assert len(observation_events) == 1
observation = observation_events[0].event_specific_data.asset_observation
assert len(observation.metadata_entries) == 5
entry_map = {entry.label: entry.entry_data.__class__ for entry in observation.metadata_entries}
assert entry_map["text"] == TextMetadataEntryData
assert entry_map["int"] == IntMetadataEntryData
assert entry_map["url"] == UrlMetadataEntryData
assert entry_map["float"] == FloatMetadataEntryData
assert entry_map["python"] == PythonArtifactMetadataEntryData
def test_unknown_metadata_value():
@solid(output_defs=[])
def the_solid(context):
yield AssetMaterialization(
asset_key="foo",
metadata={"bad": context.instance},
)
@pipeline
def the_pipeline():
the_solid()
with pytest.raises(DagsterInvalidEventMetadata) as exc_info:
execute_pipeline(the_pipeline)
assert str(exc_info.value) == (
'Could not resolve the metadata value for "bad" to a known type. '
"Its type was <class 'dagster.core.instance.DagsterInstance'>. "
"Consider wrapping the value with the appropriate EventMetadata type."
)
def test_parse_invalid_metadata():
metadata = {"foo": object()}
with pytest.raises(DagsterInvalidEventMetadata) as exc_info:
parse_metadata(metadata, [])
entries = parse_metadata(metadata, [], allow_invalid=True)
assert len(entries) == 1
assert entries[0].label == "foo"
assert entries[0].entry_data == TextMetadataEntryData("[object] (unserializable)")
def test_bad_json_metadata_value():
@solid(output_defs=[])
def the_solid(context):
yield AssetMaterialization(
asset_key="foo",
metadata={"bad": {"nested": context.instance}},
)
@pipeline
def the_pipeline():
the_solid()
with pytest.raises(DagsterInvalidEventMetadata) as exc_info:
execute_pipeline(the_pipeline)
assert str(exc_info.value) == (
'Could not resolve the metadata value for "bad" to a JSON serializable value. '
"Consider wrapping the value with the appropriate EventMetadata type."
)
def test_table_metadata_value_schema_inference():
table_metadata_value = EventMetadataEntry.table(
records=[
TableRecord(name="foo", status=False),
TableRecord(name="bar", status=True),
],
label="foo",
)
schema = table_metadata_value.entry_data.schema
assert isinstance(schema, TableSchema)
assert schema.columns == [
TableColumn(name="name", type="string"),
TableColumn(name="status", type="bool"),
]
bad_values = frozendict(
{
"table_schema": {"columns": False, "constraints": False},
"table_column": {"name": False, "type": False, "description": False, "constraints": False},
"table_constraints": {"other": False},
"table_column_constraints": {
"nullable": "foo",
"unique": "foo",
"other": False,
},
}
)
def test_table_column_keys():
with pytest.raises(TypeError):
TableColumn(bad_key="foo", description="bar", type="string") # type: ignore
@pytest.mark.parametrize("key,value", list(bad_values["table_column"].items()))
def test_table_column_values(key, value):
kwargs = {
"name": "foo",
"type": "string",
"description": "bar",
"constraints": TableColumnConstraints(other=["foo"]),
}
kwargs[key] = value
with pytest.raises(CheckError):
TableColumn(**kwargs)
def test_table_constraints_keys():
with pytest.raises(TypeError):
TableColumn(bad_key="foo") # type: ignore
@pytest.mark.parametrize("key,value", list(bad_values["table_constraints"].items()))
def test_table_constraints(key, value):
kwargs = {"other": ["foo"]}
kwargs[key] = value
with pytest.raises(CheckError):
TableConstraints(**kwargs)
def test_table_column_constraints_keys():
with pytest.raises(TypeError):
TableColumnConstraints(bad_key="foo") # type: ignore
# minimum and maximum aren't checked because they depend on the type of the column
@pytest.mark.parametrize("key,value", list(bad_values["table_column_constraints"].items()))
def test_table_column_constraints_values(key, value):
kwargs = {
"nullable": True,
"unique": True,
"other": ["foo"],
}
kwargs[key] = value
with pytest.raises(CheckError):
TableColumnConstraints(**kwargs)
def test_table_schema_keys():
with pytest.raises(TypeError):
TableSchema(bad_key="foo") # type: ignore
@pytest.mark.parametrize("key,value", list(bad_values["table_schema"].items()))
def test_table_schema_values(key, value):
kwargs = {
"constraints": TableConstraints(other=["foo"]),
"columns": [
TableColumn(
name="foo",
type="string",
description="bar",
constraints=TableColumnConstraints(other=["foo"]),
)
],
}
kwargs[key] = value
with pytest.raises(CheckError):
TableSchema(**kwargs)
def test_complex_table_schema():
assert isinstance(
TableSchema(
columns=[
TableColumn(
name="foo",
type="customtype",
constraints=TableColumnConstraints(
nullable=True,
unique=True,
),
),
TableColumn(
name="bar",
type="string",
description="bar",
constraints=TableColumnConstraints(
nullable=False,
other=["foo"],
),
),
],
constraints=TableConstraints(other=["foo"]),
),
TableSchema,
)
| import pytest
from dagster import (
AssetMaterialization,
AssetObservation,
DagsterEventType,
EventMetadata,
FloatMetadataEntryData,
IntMetadataEntryData,
PythonArtifactMetadataEntryData,
TextMetadataEntryData,
UrlMetadataEntryData,
execute_pipeline,
pipeline,
solid,
)
from dagster.check import CheckError
from dagster.core.definitions.event_metadata import (
DagsterInvalidEventMetadata,
EventMetadataEntry,
parse_metadata,
)
from dagster.core.definitions.event_metadata.table import (
TableColumn,
TableColumnConstraints,
TableConstraints,
TableRecord,
TableSchema,
)
from dagster.utils import frozendict
def solid_events_for_type(result, solid_name, event_type):
solid_result = result.result_for_solid(solid_name)
return [
compute_step_event
for compute_step_event in solid_result.compute_step_events
if compute_step_event.event_type == event_type
]
def test_event_metadata_asset_materialization():
@solid(output_defs=[])
def the_solid(_context):
yield AssetMaterialization(
asset_key="foo",
metadata={
"text": "FOO",
"int": 22,
"url": EventMetadata.url("http://fake.com"),
"float": 0.1,
"python": EventMetadata.python_artifact(EventMetadata),
},
)
@pipeline
def the_pipeline():
the_solid()
result = execute_pipeline(the_pipeline)
assert result
assert result.success
materialization_events = solid_events_for_type(
result, "the_solid", DagsterEventType.ASSET_MATERIALIZATION
)
assert len(materialization_events) == 1
materialization = materialization_events[0].event_specific_data.materialization
assert len(materialization.metadata_entries) == 5
entry_map = {
entry.label: entry.entry_data.__class__ for entry in materialization.metadata_entries
}
assert entry_map["text"] == TextMetadataEntryData
assert entry_map["int"] == IntMetadataEntryData
assert entry_map["url"] == UrlMetadataEntryData
assert entry_map["float"] == FloatMetadataEntryData
assert entry_map["python"] == PythonArtifactMetadataEntryData
def test_event_metadata_asset_observation():
@solid(output_defs=[])
def the_solid(_context):
yield AssetObservation(
asset_key="foo",
metadata={
"text": "FOO",
"int": 22,
"url": EventMetadata.url("http://fake.com"),
"float": 0.1,
"python": EventMetadata.python_artifact(EventMetadata),
},
)
@pipeline
def the_pipeline():
the_solid()
result = execute_pipeline(the_pipeline)
assert result
assert result.success
observation_events = solid_events_for_type(
result, "the_solid", DagsterEventType.ASSET_OBSERVATION
)
assert len(observation_events) == 1
observation = observation_events[0].event_specific_data.asset_observation
assert len(observation.metadata_entries) == 5
entry_map = {entry.label: entry.entry_data.__class__ for entry in observation.metadata_entries}
assert entry_map["text"] == TextMetadataEntryData
assert entry_map["int"] == IntMetadataEntryData
assert entry_map["url"] == UrlMetadataEntryData
assert entry_map["float"] == FloatMetadataEntryData
assert entry_map["python"] == PythonArtifactMetadataEntryData
def test_unknown_metadata_value():
@solid(output_defs=[])
def the_solid(context):
yield AssetMaterialization(
asset_key="foo",
metadata={"bad": context.instance},
)
@pipeline
def the_pipeline():
the_solid()
with pytest.raises(DagsterInvalidEventMetadata) as exc_info:
execute_pipeline(the_pipeline)
assert str(exc_info.value) == (
'Could not resolve the metadata value for "bad" to a known type. '
"Its type was <class 'dagster.core.instance.DagsterInstance'>. "
"Consider wrapping the value with the appropriate EventMetadata type."
)
def test_parse_invalid_metadata():
metadata = {"foo": object()}
with pytest.raises(DagsterInvalidEventMetadata) as exc_info:
parse_metadata(metadata, [])
entries = parse_metadata(metadata, [], allow_invalid=True)
assert len(entries) == 1
assert entries[0].label == "foo"
assert entries[0].entry_data == TextMetadataEntryData("[object] (unserializable)")
def test_bad_json_metadata_value():
@solid(output_defs=[])
def the_solid(context):
yield AssetMaterialization(
asset_key="foo",
metadata={"bad": {"nested": context.instance}},
)
@pipeline
def the_pipeline():
the_solid()
with pytest.raises(DagsterInvalidEventMetadata) as exc_info:
execute_pipeline(the_pipeline)
assert str(exc_info.value) == (
'Could not resolve the metadata value for "bad" to a JSON serializable value. '
"Consider wrapping the value with the appropriate EventMetadata type."
)
def test_table_metadata_value_schema_inference():
table_metadata_value = EventMetadataEntry.table(
records=[
TableRecord(name="foo", status=False),
TableRecord(name="bar", status=True),
],
label="foo",
)
schema = table_metadata_value.entry_data.schema
assert isinstance(schema, TableSchema)
assert schema.columns == [
TableColumn(name="name", type="string"),
TableColumn(name="status", type="bool"),
]
bad_values = frozendict(
{
"table_schema": {"columns": False, "constraints": False},
"table_column": {"name": False, "type": False, "description": False, "constraints": False},
"table_constraints": {"other": False},
"table_column_constraints": {
"nullable": "foo",
"unique": "foo",
"other": False,
},
}
)
def test_table_column_keys():
with pytest.raises(TypeError):
TableColumn(bad_key="foo", description="bar", type="string") # type: ignore
@pytest.mark.parametrize("key,value", list(bad_values["table_column"].items()))
def test_table_column_values(key, value):
kwargs = {
"name": "foo",
"type": "string",
"description": "bar",
"constraints": TableColumnConstraints(other=["foo"]),
}
kwargs[key] = value
with pytest.raises(CheckError):
TableColumn(**kwargs)
def test_table_constraints_keys():
with pytest.raises(TypeError):
TableColumn(bad_key="foo") # type: ignore
@pytest.mark.parametrize("key,value", list(bad_values["table_constraints"].items()))
def test_table_constraints(key, value):
kwargs = {"other": ["foo"]}
kwargs[key] = value
with pytest.raises(CheckError):
TableConstraints(**kwargs)
def test_table_column_constraints_keys():
with pytest.raises(TypeError):
TableColumnConstraints(bad_key="foo") # type: ignore
# minimum and maximum aren't checked because they depend on the type of the column
@pytest.mark.parametrize("key,value", list(bad_values["table_column_constraints"].items()))
def test_table_column_constraints_values(key, value):
kwargs = {
"nullable": True,
"unique": True,
"other": ["foo"],
}
kwargs[key] = value
with pytest.raises(CheckError):
TableColumnConstraints(**kwargs)
def test_table_schema_keys():
with pytest.raises(TypeError):
TableSchema(bad_key="foo") # type: ignore
@pytest.mark.parametrize("key,value", list(bad_values["table_schema"].items()))
def test_table_schema_values(key, value):
kwargs = {
"constraints": TableConstraints(other=["foo"]),
"columns": [
TableColumn(
name="foo",
type="string",
description="bar",
constraints=TableColumnConstraints(other=["foo"]),
)
],
}
kwargs[key] = value
with pytest.raises(CheckError):
TableSchema(**kwargs)
def test_complex_table_schema():
assert isinstance(
TableSchema(
columns=[
TableColumn(
name="foo",
type="customtype",
constraints=TableColumnConstraints(
nullable=True,
unique=True,
),
),
TableColumn(
name="bar",
type="string",
description="bar",
constraints=TableColumnConstraints(
nullable=False,
other=["foo"],
),
),
],
constraints=TableConstraints(other=["foo"]),
),
TableSchema,
) | en | 0.715944 | # type: ignore # type: ignore # type: ignore # minimum and maximum aren't checked because they depend on the type of the column # type: ignore | 2.119915 | 2 |
qiskit/providers/ibmq/runtime/runtime_program.py | eggerdj/qiskit-ibmq-provider | 0 | 6630431 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Qiskit runtime program."""
import logging
from typing import Optional, List, NamedTuple, Dict
logger = logging.getLogger(__name__)
class RuntimeProgram:
"""Class representing program metadata.
This class contains the metadata describing a program, such as its
name, ID, description, etc.
You can use the :class:`~qiskit.providers.ibmq.runtime.IBMRuntimeService`
to retrieve the metadata of a specific program or all programs. For example::
from qiskit import IBMQ
provider = IBMQ.load_account()
# To retrieve metadata of all programs.
programs = provider.runtime.programs()
# To retrieve metadata of a single program.
program = provider.runtime.program(program_id='circuit-runner')
print(f"Program {program.name} takes parameters {program.parameters}")
"""
def __init__(
self,
program_name: str,
program_id: str,
description: str,
parameters: Optional[List] = None,
return_values: Optional[List] = None,
interim_results: Optional[List] = None,
max_execution_time: int = 0,
version: str = "0",
backend_requirements: Optional[Dict] = None,
creation_date: str = ""
) -> None:
"""RuntimeProgram constructor.
Args:
program_name: Program name.
program_id: Program ID.
description: Program description.
parameters: Documentation on program parameters.
return_values: Documentation on program return values.
interim_results: Documentation on program interim results.
max_execution_time: Maximum execution time.
version: Program version.
backend_requirements: Backend requirements.
creation_date: Program creation date.
"""
self._name = program_name
self._id = program_id
self._description = description
self._max_execution_time = max_execution_time
self._version = version
self._backend_requirements = backend_requirements or {}
self._parameters = []
self._return_values = []
self._interim_results = []
self._creation_date = creation_date
if parameters:
for param in parameters:
self._parameters.append(
ProgramParameter(name=param['name'],
description=param['description'],
type=param['type'],
required=param['required']))
if return_values is not None:
for ret in return_values:
self._return_values.append(ProgramResult(name=ret['name'],
description=ret['description'],
type=ret['type']))
if interim_results is not None:
for intret in interim_results:
self._interim_results.append(ProgramResult(name=intret['name'],
description=intret['description'],
type=intret['type']))
def __str__(self) -> str:
def _format_common(items: List) -> None:
"""Add name, description, and type to `formatted`."""
for item in items:
formatted.append(" "*4 + "- " + item.name + ":")
formatted.append(" "*6 + "Description: " + item.description)
formatted.append(" "*6 + "Type: " + item.type)
if hasattr(item, 'required'):
formatted.append(" "*6 + "Required: " + str(item.required))
formatted = [f'{self.program_id}:',
f" Name: {self.name}",
f" Description: {self.description}",
f" Version: {self.version}",
f" Creation date: {self.creation_date}",
f" Max execution time: {self.max_execution_time}",
f" Input parameters:"]
if self._parameters:
_format_common(self._parameters)
else:
formatted.append(" "*4 + "none")
formatted.append(" Interim results:")
if self._interim_results:
_format_common(self._interim_results)
else:
formatted.append(" "*4 + "none")
formatted.append(" Returns:")
if self._return_values:
_format_common(self._return_values)
else:
formatted.append(" "*4 + "none")
return '\n'.join(formatted)
def to_dict(self) -> Dict:
"""Convert program metadata to dictionary format.
Returns:
Program metadata in dictionary format.
"""
return {
"program_id": self.program_id,
"name": self.name,
"description": self.description,
"max_execution_time": self.max_execution_time,
"version": self.version,
"backend_requirements": self.backend_requirements,
"parameters": self.parameters,
"return_values": self.return_values,
"interim_results": self.interim_results
}
@property
def program_id(self) -> str:
"""Program ID.
Returns:
Program ID.
"""
return self._id
@property
def name(self) -> str:
"""Program name.
Returns:
Program name.
"""
return self._name
@property
def description(self) -> str:
"""Program description.
Returns:
Program description.
"""
return self._description
@property
def parameters(self) -> List['ProgramParameter']:
"""Program parameter definitions.
Returns:
Parameter definitions for this program.
"""
return self._parameters
@property
def return_values(self) -> List['ProgramResult']:
"""Program return value definitions.
Returns:
Return value definitions for this program.
"""
return self._return_values
@property
def interim_results(self) -> List['ProgramResult']:
"""Program interim result definitions.
Returns:
Interim result definitions for this program.
"""
return self._interim_results
@property
def max_execution_time(self) -> int:
"""Maximum execution time in seconds.
A program execution exceeding this time will be forcibly terminated.
Returns:
Maximum execution time.
"""
return self._max_execution_time
@property
def version(self) -> str:
"""Program version.
Returns:
Program version.
"""
return self._version
@property
def backend_requirements(self) -> Dict:
"""Backend requirements.
Returns:
Backend requirements for this program.
"""
return self._backend_requirements
@property
def creation_date(self) -> str:
"""Program creation date.
Returns:
Program creation date.
"""
return self._creation_date
class ProgramParameter(NamedTuple):
"""Program parameter."""
name: str
description: str
type: str
required: bool
class ProgramResult(NamedTuple):
"""Program result."""
name: str
description: str
type: str
| # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Qiskit runtime program."""
import logging
from typing import Optional, List, NamedTuple, Dict
logger = logging.getLogger(__name__)
class RuntimeProgram:
"""Class representing program metadata.
This class contains the metadata describing a program, such as its
name, ID, description, etc.
You can use the :class:`~qiskit.providers.ibmq.runtime.IBMRuntimeService`
to retrieve the metadata of a specific program or all programs. For example::
from qiskit import IBMQ
provider = IBMQ.load_account()
# To retrieve metadata of all programs.
programs = provider.runtime.programs()
# To retrieve metadata of a single program.
program = provider.runtime.program(program_id='circuit-runner')
print(f"Program {program.name} takes parameters {program.parameters}")
"""
def __init__(
self,
program_name: str,
program_id: str,
description: str,
parameters: Optional[List] = None,
return_values: Optional[List] = None,
interim_results: Optional[List] = None,
max_execution_time: int = 0,
version: str = "0",
backend_requirements: Optional[Dict] = None,
creation_date: str = ""
) -> None:
"""RuntimeProgram constructor.
Args:
program_name: Program name.
program_id: Program ID.
description: Program description.
parameters: Documentation on program parameters.
return_values: Documentation on program return values.
interim_results: Documentation on program interim results.
max_execution_time: Maximum execution time.
version: Program version.
backend_requirements: Backend requirements.
creation_date: Program creation date.
"""
self._name = program_name
self._id = program_id
self._description = description
self._max_execution_time = max_execution_time
self._version = version
self._backend_requirements = backend_requirements or {}
self._parameters = []
self._return_values = []
self._interim_results = []
self._creation_date = creation_date
if parameters:
for param in parameters:
self._parameters.append(
ProgramParameter(name=param['name'],
description=param['description'],
type=param['type'],
required=param['required']))
if return_values is not None:
for ret in return_values:
self._return_values.append(ProgramResult(name=ret['name'],
description=ret['description'],
type=ret['type']))
if interim_results is not None:
for intret in interim_results:
self._interim_results.append(ProgramResult(name=intret['name'],
description=intret['description'],
type=intret['type']))
def __str__(self) -> str:
def _format_common(items: List) -> None:
"""Add name, description, and type to `formatted`."""
for item in items:
formatted.append(" "*4 + "- " + item.name + ":")
formatted.append(" "*6 + "Description: " + item.description)
formatted.append(" "*6 + "Type: " + item.type)
if hasattr(item, 'required'):
formatted.append(" "*6 + "Required: " + str(item.required))
formatted = [f'{self.program_id}:',
f" Name: {self.name}",
f" Description: {self.description}",
f" Version: {self.version}",
f" Creation date: {self.creation_date}",
f" Max execution time: {self.max_execution_time}",
f" Input parameters:"]
if self._parameters:
_format_common(self._parameters)
else:
formatted.append(" "*4 + "none")
formatted.append(" Interim results:")
if self._interim_results:
_format_common(self._interim_results)
else:
formatted.append(" "*4 + "none")
formatted.append(" Returns:")
if self._return_values:
_format_common(self._return_values)
else:
formatted.append(" "*4 + "none")
return '\n'.join(formatted)
def to_dict(self) -> Dict:
"""Convert program metadata to dictionary format.
Returns:
Program metadata in dictionary format.
"""
return {
"program_id": self.program_id,
"name": self.name,
"description": self.description,
"max_execution_time": self.max_execution_time,
"version": self.version,
"backend_requirements": self.backend_requirements,
"parameters": self.parameters,
"return_values": self.return_values,
"interim_results": self.interim_results
}
@property
def program_id(self) -> str:
"""Program ID.
Returns:
Program ID.
"""
return self._id
@property
def name(self) -> str:
"""Program name.
Returns:
Program name.
"""
return self._name
@property
def description(self) -> str:
"""Program description.
Returns:
Program description.
"""
return self._description
@property
def parameters(self) -> List['ProgramParameter']:
"""Program parameter definitions.
Returns:
Parameter definitions for this program.
"""
return self._parameters
@property
def return_values(self) -> List['ProgramResult']:
"""Program return value definitions.
Returns:
Return value definitions for this program.
"""
return self._return_values
@property
def interim_results(self) -> List['ProgramResult']:
"""Program interim result definitions.
Returns:
Interim result definitions for this program.
"""
return self._interim_results
@property
def max_execution_time(self) -> int:
"""Maximum execution time in seconds.
A program execution exceeding this time will be forcibly terminated.
Returns:
Maximum execution time.
"""
return self._max_execution_time
@property
def version(self) -> str:
"""Program version.
Returns:
Program version.
"""
return self._version
@property
def backend_requirements(self) -> Dict:
"""Backend requirements.
Returns:
Backend requirements for this program.
"""
return self._backend_requirements
@property
def creation_date(self) -> str:
"""Program creation date.
Returns:
Program creation date.
"""
return self._creation_date
class ProgramParameter(NamedTuple):
"""Program parameter."""
name: str
description: str
type: str
required: bool
class ProgramResult(NamedTuple):
"""Program result."""
name: str
description: str
type: str
| en | 0.667263 | # This code is part of Qiskit. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. Qiskit runtime program. Class representing program metadata. This class contains the metadata describing a program, such as its name, ID, description, etc. You can use the :class:`~qiskit.providers.ibmq.runtime.IBMRuntimeService` to retrieve the metadata of a specific program or all programs. For example:: from qiskit import IBMQ provider = IBMQ.load_account() # To retrieve metadata of all programs. programs = provider.runtime.programs() # To retrieve metadata of a single program. program = provider.runtime.program(program_id='circuit-runner') print(f"Program {program.name} takes parameters {program.parameters}") RuntimeProgram constructor. Args: program_name: Program name. program_id: Program ID. description: Program description. parameters: Documentation on program parameters. return_values: Documentation on program return values. interim_results: Documentation on program interim results. max_execution_time: Maximum execution time. version: Program version. backend_requirements: Backend requirements. creation_date: Program creation date. Add name, description, and type to `formatted`. Convert program metadata to dictionary format. Returns: Program metadata in dictionary format. Program ID. Returns: Program ID. Program name. Returns: Program name. Program description. Returns: Program description. Program parameter definitions. Returns: Parameter definitions for this program. Program return value definitions. Returns: Return value definitions for this program. Program interim result definitions. Returns: Interim result definitions for this program. Maximum execution time in seconds. A program execution exceeding this time will be forcibly terminated. Returns: Maximum execution time. Program version. Returns: Program version. Backend requirements. Returns: Backend requirements for this program. Program creation date. Returns: Program creation date. Program parameter. Program result. | 2.415876 | 2 |
test/selenium/src/lib/meta.py | sbilly/ggrc-core | 1 | 6630432 | <filename>test/selenium/src/lib/meta.py<gh_stars>1-10
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Metaclasses module"""
import pytest
from lib import exception
from lib import constants
from lib import environment
class DecorateFlakyTests(type):
# todo: this should be refactored to DecorateMethods and used with a
# factory
"""Decorates all test methods with a decorator that repeats a failed test a
couple of times
"""
def __new__(mcs, name, bases, dct):
for attr_name, value in dct.items():
if any(
[method_name in attr_name for method_name in [
constants.test_runner.TEST_METHOD_PREFIX,
constants.test_runner.TEST_METHOD_POSTFIX]
]) and callable(value):
dct[attr_name] = pytest.mark.flaky(
reruns=environment.RERUN_FAILED_TEST)(value)
return super(DecorateFlakyTests, mcs).__new__(mcs, name, bases, dct)
class RequireDocs(type):
"""Requires from all methods to include docstrings"""
def __new__(mcs, name, bases, dct):
for attr_name, value in dct.items():
if callable(value) and not hasattr(value, "__doc__"):
raise exception.DocstringsMissing(attr_name)
return super(RequireDocs, mcs).__new__(mcs, name, bases, dct)
| <filename>test/selenium/src/lib/meta.py<gh_stars>1-10
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Metaclasses module"""
import pytest
from lib import exception
from lib import constants
from lib import environment
class DecorateFlakyTests(type):
# todo: this should be refactored to DecorateMethods and used with a
# factory
"""Decorates all test methods with a decorator that repeats a failed test a
couple of times
"""
def __new__(mcs, name, bases, dct):
for attr_name, value in dct.items():
if any(
[method_name in attr_name for method_name in [
constants.test_runner.TEST_METHOD_PREFIX,
constants.test_runner.TEST_METHOD_POSTFIX]
]) and callable(value):
dct[attr_name] = pytest.mark.flaky(
reruns=environment.RERUN_FAILED_TEST)(value)
return super(DecorateFlakyTests, mcs).__new__(mcs, name, bases, dct)
class RequireDocs(type):
"""Requires from all methods to include docstrings"""
def __new__(mcs, name, bases, dct):
for attr_name, value in dct.items():
if callable(value) and not hasattr(value, "__doc__"):
raise exception.DocstringsMissing(attr_name)
return super(RequireDocs, mcs).__new__(mcs, name, bases, dct)
| en | 0.811514 | # Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> Metaclasses module # todo: this should be refactored to DecorateMethods and used with a # factory Decorates all test methods with a decorator that repeats a failed test a couple of times Requires from all methods to include docstrings | 2.322035 | 2 |
13. File handling/Files.py | riyabhatia26/Python-Programming | 3 | 6630433 | import sys
f = open(sys.argv[1],mode = 'rt', encoding='utf-8')
for line in f:
sys.stdout.write(line)
f.close()
| import sys
f = open(sys.argv[1],mode = 'rt', encoding='utf-8')
for line in f:
sys.stdout.write(line)
f.close()
| none | 1 | 2.363899 | 2 |
|
core/migrations/0001_initial.py | rdmaulana/covid19-dashboard-idn | 0 | 6630434 | <gh_stars>0
# Generated by Django 3.1.4 on 2020-12-23 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='covidRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(max_length=255)),
('total', models.CharField(max_length=255)),
('active', models.CharField(max_length=255)),
('recovered', models.CharField(max_length=255)),
('new_cases', models.CharField(blank=True, max_length=255, null=True)),
('new_deaths', models.CharField(blank=True, max_length=255, null=True)),
('total_deaths', models.CharField(blank=True, max_length=255, null=True)),
('total_tests', models.CharField(blank=True, max_length=255, null=True)),
('date', models.DateField()),
('latest_update', models.DateTimeField()),
],
options={
'verbose_name_plural': 'MASTER COVID DATA RECORD',
'db_table': 'covid_record',
},
),
]
| # Generated by Django 3.1.4 on 2020-12-23 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='covidRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(max_length=255)),
('total', models.CharField(max_length=255)),
('active', models.CharField(max_length=255)),
('recovered', models.CharField(max_length=255)),
('new_cases', models.CharField(blank=True, max_length=255, null=True)),
('new_deaths', models.CharField(blank=True, max_length=255, null=True)),
('total_deaths', models.CharField(blank=True, max_length=255, null=True)),
('total_tests', models.CharField(blank=True, max_length=255, null=True)),
('date', models.DateField()),
('latest_update', models.DateTimeField()),
],
options={
'verbose_name_plural': 'MASTER COVID DATA RECORD',
'db_table': 'covid_record',
},
),
] | en | 0.832646 | # Generated by Django 3.1.4 on 2020-12-23 07:55 | 1.910882 | 2 |
ana/hismask.py | seriksen/opticks | 1 | 6630435 | #!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
hismask.py: HisMask
========================
"""
import os, datetime, logging, sys
log = logging.getLogger(__name__)
import numpy as np
from opticks.ana.base import PhotonMaskFlags
from opticks.ana.seq import MaskType, SeqTable, SeqAna
from opticks.ana.nbase import count_unique_sorted
from opticks.ana.nload import A
class HisMask(MaskType):
"""
Formerly the abbrev came from $OPTICKS_DATA_DIR/resource/GFlags/abbrev.json
now from the muck more appropriate installcache/OKC
"""
def __init__(self):
log.debug("HisMask.__init__")
flags = PhotonMaskFlags()
MaskType.__init__(self, flags, flags.abbrev)
log.debug("HisMask.__init__ DONE")
def test_HisMask(af):
label = "TO BT SD"
mask = af.code(label)
label2 = af.label(mask)
log.info( " %30s -> %d -> %10s " % (label, mask, label2 ))
def test_HisMask_SeqTable(aa, af):
hflags = aa[:,3,3].view(np.uint32)
cu = count_unique_sorted(hflags)
st = SeqTable(cu, af)
print st
def test_HisMask_SeqAna(aa, af):
hflags = aa[:,3,3].view(np.uint32)
sa = SeqAna(hflags, af)
print sa.table
if __name__ == '__main__':
from opticks.ana.main import opticks_main
#ok = opticks_main(src="torch", tag="10", det="PmtInBox")
ok = opticks_main()
af = HisMask()
test_HisMask(af)
try:
ht = A.load_("ht",ok.src,ok.tag,ok.det, pfx=ok.pfx)
log.info("loaded ht %s %s shape %s " % (ht.path, ht.stamp, repr(ht.shape)))
#test_HisMask_SeqTable(ht, af)
test_HisMask_SeqAna(ht, af)
except IOError as err:
log.warning("no ht")
try:
ox = A.load_("ox",ok.src,ok.tag,ok.det, pfx=ok.pfx)
log.info("loaded ox %s %s shape %s " % (ox.path, ox.stamp, repr(ox.shape)))
#test_HisMask_SeqTable(ox, af)
test_HisMask_SeqAna(ox, af)
except IOError as err:
log.warning("no ht")
| #!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
hismask.py: HisMask
========================
"""
import os, datetime, logging, sys
log = logging.getLogger(__name__)
import numpy as np
from opticks.ana.base import PhotonMaskFlags
from opticks.ana.seq import MaskType, SeqTable, SeqAna
from opticks.ana.nbase import count_unique_sorted
from opticks.ana.nload import A
class HisMask(MaskType):
"""
Formerly the abbrev came from $OPTICKS_DATA_DIR/resource/GFlags/abbrev.json
now from the muck more appropriate installcache/OKC
"""
def __init__(self):
log.debug("HisMask.__init__")
flags = PhotonMaskFlags()
MaskType.__init__(self, flags, flags.abbrev)
log.debug("HisMask.__init__ DONE")
def test_HisMask(af):
label = "TO BT SD"
mask = af.code(label)
label2 = af.label(mask)
log.info( " %30s -> %d -> %10s " % (label, mask, label2 ))
def test_HisMask_SeqTable(aa, af):
hflags = aa[:,3,3].view(np.uint32)
cu = count_unique_sorted(hflags)
st = SeqTable(cu, af)
print st
def test_HisMask_SeqAna(aa, af):
hflags = aa[:,3,3].view(np.uint32)
sa = SeqAna(hflags, af)
print sa.table
if __name__ == '__main__':
from opticks.ana.main import opticks_main
#ok = opticks_main(src="torch", tag="10", det="PmtInBox")
ok = opticks_main()
af = HisMask()
test_HisMask(af)
try:
ht = A.load_("ht",ok.src,ok.tag,ok.det, pfx=ok.pfx)
log.info("loaded ht %s %s shape %s " % (ht.path, ht.stamp, repr(ht.shape)))
#test_HisMask_SeqTable(ht, af)
test_HisMask_SeqAna(ht, af)
except IOError as err:
log.warning("no ht")
try:
ox = A.load_("ox",ok.src,ok.tag,ok.det, pfx=ok.pfx)
log.info("loaded ox %s %s shape %s " % (ox.path, ox.stamp, repr(ox.shape)))
#test_HisMask_SeqTable(ox, af)
test_HisMask_SeqAna(ox, af)
except IOError as err:
log.warning("no ht")
| en | 0.738883 | #!/usr/bin/env python # # Copyright (c) 2019 Opticks Team. All Rights Reserved. # # This file is part of Opticks # (see https://bitbucket.org/simoncblyth/opticks). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # hismask.py: HisMask ======================== Formerly the abbrev came from $OPTICKS_DATA_DIR/resource/GFlags/abbrev.json now from the muck more appropriate installcache/OKC #ok = opticks_main(src="torch", tag="10", det="PmtInBox") #test_HisMask_SeqTable(ht, af) #test_HisMask_SeqTable(ox, af) | 2.021579 | 2 |
setup.py | energinet-singularity/singupy | 0 | 6630436 | from setuptools import setup, find_packages
setup(
name='singupy',
url='https://github.com/energinet-singularity/singupy',
packages=find_packages(exclude=['tests']),
python_requires='>=3.8',
install_requires=[
'pandas>=1.4.1',
'pandasql>=0.7.3',
'Flask>=2.1.0',
'Flask-RESTful>=0.3.9',
'requests>=2.27.1'
],
version='0.1.4',
license='Apache License 2.0',
description='Library for Singularity',
long_description=open('README.md').read(),
)
| from setuptools import setup, find_packages
setup(
name='singupy',
url='https://github.com/energinet-singularity/singupy',
packages=find_packages(exclude=['tests']),
python_requires='>=3.8',
install_requires=[
'pandas>=1.4.1',
'pandasql>=0.7.3',
'Flask>=2.1.0',
'Flask-RESTful>=0.3.9',
'requests>=2.27.1'
],
version='0.1.4',
license='Apache License 2.0',
description='Library for Singularity',
long_description=open('README.md').read(),
)
| none | 1 | 1.14809 | 1 |
|
djmoney/models/fields.py | fizista/django-money | 0 | 6630437 | <reponame>fizista/django-money<gh_stars>0
from __future__ import division
from django.db import models
from django.conf import settings
try:
from django.utils.encoding import smart_unicode
except ImportError:
# Python 3
from django.utils.encoding import smart_text as smart_unicode
from django.utils import translation
from django.db.models.signals import class_prepared
from moneyed import Money, Currency, DEFAULT_CURRENCY
from moneyed.localization import _FORMATTER, format_money
from djmoney import forms
from djmoney.forms.widgets import CURRENCY_CHOICES
from django.db.models.expressions import ExpressionNode
from djmoney.utils import get_currency_field_name
from decimal import Decimal, ROUND_DOWN
import inspect
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, in Python 3
basestring = (str, bytes)
__all__ = ('MoneyField', 'NotSupportedLookup')
SUPPORTED_LOOKUPS = ('exact', 'isnull', 'lt', 'gt', 'lte', 'gte')
class NotSupportedLookup(Exception):
def __init__(self, lookup):
self.lookup = lookup
def __str__(self):
return "Lookup '%s' is not supported for MoneyField" % self.lookup
class MoneyPatched(Money):
# Set to True or False has a higher priority
# than USE_L10N == True in the django settings file.
# The variable "self.use_l10n" has three states:
use_l10n = None
def __float__(self):
return float(self.amount)
@classmethod
def _patch_to_current_class(cls, money):
"""
Converts object of type MoneyPatched on the object of type Money.
"""
return cls(money.amount, money.currency)
def __pos__(self):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__pos__())
def __neg__(self):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__neg__())
def __add__(self, other):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__add__(other))
def __sub__(self, other):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__sub__(other))
def __mul__(self, other):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__mul__(other))
def __truediv__(self, other):
if isinstance(other, Money):
return super(MoneyPatched, self).__truediv__(other)
else:
return self._patch_to_current_class(
super(MoneyPatched, self).__truediv__(other))
def __rmod__(self, other):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__rmod__(other))
def __get_current_locale(self):
locale = translation.get_language()
if _FORMATTER.get_formatting_definition(locale):
return locale
if _FORMATTER.get_formatting_definition('%s_%s' % (locale, locale)):
return '%s_%s' % (locale, locale)
return ''
def __use_l10n(self):
'Return boolean'
# Do not change. The variable "self.use_l10n" has three states:
# True, False, and None.
if self.use_l10n == True:
return True
# Do not change. The variable "self.use_l10n" has three states:
# True, False, and None.
if self.use_l10n == False:
return False
# if self.use_l10n == None >>
return settings.USE_L10N
def __unicode__(self):
if self.__use_l10n():
locale = self.__get_current_locale()
if locale:
return format_money(self, locale=locale)
return format_money(self)
def __str__(self):
if self.__use_l10n():
locale = self.__get_current_locale()
if locale:
return format_money(self, locale=locale)
return format_money(self)
def __repr__(self):
# small fix for tests
return "%s %s" % (self.amount.to_integral_value(ROUND_DOWN),
self.currency)
class MoneyFieldProxy(object):
def __init__(self, field):
self.field = field
self.currency_field_name = get_currency_field_name(self.field.name)
def _money_from_obj(self, obj):
amount = obj.__dict__[self.field.name]
currency = obj.__dict__[self.currency_field_name]
if amount is None:
return None
return MoneyPatched(amount=amount, currency=currency)
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
if isinstance(obj.__dict__[self.field.name], ExpressionNode):
return obj.__dict__[self.field.name]
if not isinstance(obj.__dict__[self.field.name], Money):
obj.__dict__[self.field.name] = self._money_from_obj(obj)
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
if isinstance(value, tuple):
value = Money(amount=value[0], currency=value[1])
if isinstance(value, Money):
obj.__dict__[self.field.name] = value.amount
setattr(obj, self.currency_field_name,
smart_unicode(value.currency))
elif isinstance(value, ExpressionNode):
if isinstance(value.children[1], Money):
value.children[1] = value.children[1].amount
obj.__dict__[self.field.name] = value
else:
if value:
value = str(value)
obj.__dict__[self.field.name] = self.field.to_python(value)
class CurrencyField(models.CharField):
description = "A field which stores currency."
def __init__(self, price_field=None, verbose_name=None, name=None,
default=DEFAULT_CURRENCY, **kwargs):
if isinstance(default, Currency):
default = default.code
kwargs['max_length'] = 3
self.price_field = price_field
self.frozen_by_south = kwargs.pop('frozen_by_south', False)
super(CurrencyField, self).__init__(verbose_name, name, default=default,
**kwargs)
def get_internal_type(self):
return "CharField"
def contribute_to_class(self, cls, name):
if not self.frozen_by_south and not name in [f.name for f in cls._meta.fields]:
super(CurrencyField, self).contribute_to_class(cls, name)
class MoneyField(models.DecimalField):
description = "A field which stores both the currency and amount of money."
def __init__(self, verbose_name=None, name=None,
max_digits=None, decimal_places=None,
default=Money(0.0, DEFAULT_CURRENCY),
default_currency=DEFAULT_CURRENCY,
currency_choices=CURRENCY_CHOICES, **kwargs):
if isinstance(default, basestring):
try:
amount, currency = default.split(" ")
default = Money(float(amount), Currency(code=currency))
except ValueError:
default = Money(float(default), default_currency)
elif isinstance(default, (float, Decimal)):
default = Money(default, default_currency)
if not isinstance(default, Money):
raise Exception(
"default value must be an instance of Money, is: %s" % str(
default))
# Avoid giving the user hard-to-debug errors if they miss required attributes
if max_digits is None:
raise Exception(
"You have to provide a max_digits attribute to Money fields.")
if decimal_places is None:
raise Exception(
"You have to provide a decimal_places attribute to Money fields.")
if not default_currency:
default_currency = default.currency
self.default_currency = default_currency
self.currency_choices = currency_choices
self.frozen_by_south = kwargs.pop('frozen_by_south', False)
super(MoneyField, self).__init__(verbose_name, name, max_digits,
decimal_places, default=default,
**kwargs)
def to_python(self, value):
if isinstance(value, Money):
value = value.amount
if isinstance(value, tuple):
value = value[0]
return super(MoneyField, self).to_python(value)
def get_internal_type(self):
return "DecimalField"
def contribute_to_class(self, cls, name):
# Don't run on abstract classes
if cls._meta.abstract:
return
if not self.frozen_by_south:
c_field_name = get_currency_field_name(name)
# Do not change default=self.default_currency.code, needed
# for south compat.
c_field = CurrencyField(
max_length=3, price_field=self,
default=self.default_currency, editable=False,
choices=self.currency_choices
)
c_field.creation_counter = self.creation_counter
cls.add_to_class(c_field_name, c_field)
super(MoneyField, self).contribute_to_class(cls, name)
setattr(cls, self.name, MoneyFieldProxy(self))
def get_db_prep_save(self, value, connection):
if isinstance(value, Money):
value = value.amount
return value
return super(MoneyField, self).get_db_prep_save(value, connection)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
if not lookup_type in SUPPORTED_LOOKUPS:
raise NotSupportedLookup(lookup_type)
value = self.get_db_prep_save(value, connection)
return super(MoneyField, self).get_db_prep_lookup(lookup_type, value,
connection, prepared)
def get_default(self):
if isinstance(self.default, Money):
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
# We need to return the numerical value if this is called by south
if mod.__name__ == "south.db.generic":
return float(self.default.amount)
return self.default
else:
return super(MoneyField, self).get_default()
def formfield(self, **kwargs):
defaults = {'form_class': forms.MoneyField}
defaults.update(kwargs)
defaults['currency_choices'] = self.currency_choices
return super(MoneyField, self).formfield(**defaults)
def get_south_default(self):
return '%s' % str(self.default)
def get_south_default_currency(self):
return '"%s"' % str(self.default_currency.code)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
## South support
def south_field_triple(self):
"Returns a suitable description of this field for South."
# Note: This method gets automatically with schemamigration time.
from south.modelsinspector import introspector
field_class = self.__class__.__module__ + "." + self.__class__.__name__
args, kwargs = introspector(self)
# We need to
# 1. Delete the default, 'cause it's not automatically supported.
kwargs.pop('default')
# 2. add the default currency, because it's not picked up from the inspector automatically.
kwargs['default_currency'] = "'%s'" % self.default_currency
return field_class, args, kwargs
try:
from south.modelsinspector import add_introspection_rules
rules = [
# MoneyField has its own method.
((CurrencyField,),
[], # No positional args
{'default': ('default', {'default': DEFAULT_CURRENCY.code}),
'max_length': ('max_length', {'default': 3})}),
]
# MoneyField implement the serialization in south_field_triple method
add_introspection_rules(rules, ["^djmoney\.models\.fields\.CurrencyField"])
except ImportError:
pass
def patch_managers(sender, **kwargs):
"""
Patches models managers
"""
from .managers import money_manager
if any(isinstance(field, MoneyField) for field in sender._meta.fields):
for _id, name, manager in sender._meta.concrete_managers:
setattr(sender, name, money_manager(manager))
class_prepared.connect(patch_managers)
| from __future__ import division
from django.db import models
from django.conf import settings
try:
from django.utils.encoding import smart_unicode
except ImportError:
# Python 3
from django.utils.encoding import smart_text as smart_unicode
from django.utils import translation
from django.db.models.signals import class_prepared
from moneyed import Money, Currency, DEFAULT_CURRENCY
from moneyed.localization import _FORMATTER, format_money
from djmoney import forms
from djmoney.forms.widgets import CURRENCY_CHOICES
from django.db.models.expressions import ExpressionNode
from djmoney.utils import get_currency_field_name
from decimal import Decimal, ROUND_DOWN
import inspect
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, in Python 3
basestring = (str, bytes)
__all__ = ('MoneyField', 'NotSupportedLookup')
SUPPORTED_LOOKUPS = ('exact', 'isnull', 'lt', 'gt', 'lte', 'gte')
class NotSupportedLookup(Exception):
def __init__(self, lookup):
self.lookup = lookup
def __str__(self):
return "Lookup '%s' is not supported for MoneyField" % self.lookup
class MoneyPatched(Money):
# Set to True or False has a higher priority
# than USE_L10N == True in the django settings file.
# The variable "self.use_l10n" has three states:
use_l10n = None
def __float__(self):
return float(self.amount)
@classmethod
def _patch_to_current_class(cls, money):
"""
Converts object of type MoneyPatched on the object of type Money.
"""
return cls(money.amount, money.currency)
def __pos__(self):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__pos__())
def __neg__(self):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__neg__())
def __add__(self, other):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__add__(other))
def __sub__(self, other):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__sub__(other))
def __mul__(self, other):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__mul__(other))
def __truediv__(self, other):
if isinstance(other, Money):
return super(MoneyPatched, self).__truediv__(other)
else:
return self._patch_to_current_class(
super(MoneyPatched, self).__truediv__(other))
def __rmod__(self, other):
return MoneyPatched._patch_to_current_class(
super(MoneyPatched, self).__rmod__(other))
def __get_current_locale(self):
locale = translation.get_language()
if _FORMATTER.get_formatting_definition(locale):
return locale
if _FORMATTER.get_formatting_definition('%s_%s' % (locale, locale)):
return '%s_%s' % (locale, locale)
return ''
def __use_l10n(self):
'Return boolean'
# Do not change. The variable "self.use_l10n" has three states:
# True, False, and None.
if self.use_l10n == True:
return True
# Do not change. The variable "self.use_l10n" has three states:
# True, False, and None.
if self.use_l10n == False:
return False
# if self.use_l10n == None >>
return settings.USE_L10N
def __unicode__(self):
if self.__use_l10n():
locale = self.__get_current_locale()
if locale:
return format_money(self, locale=locale)
return format_money(self)
def __str__(self):
if self.__use_l10n():
locale = self.__get_current_locale()
if locale:
return format_money(self, locale=locale)
return format_money(self)
def __repr__(self):
# small fix for tests
return "%s %s" % (self.amount.to_integral_value(ROUND_DOWN),
self.currency)
class MoneyFieldProxy(object):
def __init__(self, field):
self.field = field
self.currency_field_name = get_currency_field_name(self.field.name)
def _money_from_obj(self, obj):
amount = obj.__dict__[self.field.name]
currency = obj.__dict__[self.currency_field_name]
if amount is None:
return None
return MoneyPatched(amount=amount, currency=currency)
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
if isinstance(obj.__dict__[self.field.name], ExpressionNode):
return obj.__dict__[self.field.name]
if not isinstance(obj.__dict__[self.field.name], Money):
obj.__dict__[self.field.name] = self._money_from_obj(obj)
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
if isinstance(value, tuple):
value = Money(amount=value[0], currency=value[1])
if isinstance(value, Money):
obj.__dict__[self.field.name] = value.amount
setattr(obj, self.currency_field_name,
smart_unicode(value.currency))
elif isinstance(value, ExpressionNode):
if isinstance(value.children[1], Money):
value.children[1] = value.children[1].amount
obj.__dict__[self.field.name] = value
else:
if value:
value = str(value)
obj.__dict__[self.field.name] = self.field.to_python(value)
class CurrencyField(models.CharField):
description = "A field which stores currency."
def __init__(self, price_field=None, verbose_name=None, name=None,
default=DEFAULT_CURRENCY, **kwargs):
if isinstance(default, Currency):
default = default.code
kwargs['max_length'] = 3
self.price_field = price_field
self.frozen_by_south = kwargs.pop('frozen_by_south', False)
super(CurrencyField, self).__init__(verbose_name, name, default=default,
**kwargs)
def get_internal_type(self):
return "CharField"
def contribute_to_class(self, cls, name):
if not self.frozen_by_south and not name in [f.name for f in cls._meta.fields]:
super(CurrencyField, self).contribute_to_class(cls, name)
class MoneyField(models.DecimalField):
description = "A field which stores both the currency and amount of money."
def __init__(self, verbose_name=None, name=None,
max_digits=None, decimal_places=None,
default=Money(0.0, DEFAULT_CURRENCY),
default_currency=DEFAULT_CURRENCY,
currency_choices=CURRENCY_CHOICES, **kwargs):
if isinstance(default, basestring):
try:
amount, currency = default.split(" ")
default = Money(float(amount), Currency(code=currency))
except ValueError:
default = Money(float(default), default_currency)
elif isinstance(default, (float, Decimal)):
default = Money(default, default_currency)
if not isinstance(default, Money):
raise Exception(
"default value must be an instance of Money, is: %s" % str(
default))
# Avoid giving the user hard-to-debug errors if they miss required attributes
if max_digits is None:
raise Exception(
"You have to provide a max_digits attribute to Money fields.")
if decimal_places is None:
raise Exception(
"You have to provide a decimal_places attribute to Money fields.")
if not default_currency:
default_currency = default.currency
self.default_currency = default_currency
self.currency_choices = currency_choices
self.frozen_by_south = kwargs.pop('frozen_by_south', False)
super(MoneyField, self).__init__(verbose_name, name, max_digits,
decimal_places, default=default,
**kwargs)
def to_python(self, value):
if isinstance(value, Money):
value = value.amount
if isinstance(value, tuple):
value = value[0]
return super(MoneyField, self).to_python(value)
def get_internal_type(self):
return "DecimalField"
def contribute_to_class(self, cls, name):
# Don't run on abstract classes
if cls._meta.abstract:
return
if not self.frozen_by_south:
c_field_name = get_currency_field_name(name)
# Do not change default=self.default_currency.code, needed
# for south compat.
c_field = CurrencyField(
max_length=3, price_field=self,
default=self.default_currency, editable=False,
choices=self.currency_choices
)
c_field.creation_counter = self.creation_counter
cls.add_to_class(c_field_name, c_field)
super(MoneyField, self).contribute_to_class(cls, name)
setattr(cls, self.name, MoneyFieldProxy(self))
def get_db_prep_save(self, value, connection):
if isinstance(value, Money):
value = value.amount
return value
return super(MoneyField, self).get_db_prep_save(value, connection)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
if not lookup_type in SUPPORTED_LOOKUPS:
raise NotSupportedLookup(lookup_type)
value = self.get_db_prep_save(value, connection)
return super(MoneyField, self).get_db_prep_lookup(lookup_type, value,
connection, prepared)
def get_default(self):
if isinstance(self.default, Money):
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
# We need to return the numerical value if this is called by south
if mod.__name__ == "south.db.generic":
return float(self.default.amount)
return self.default
else:
return super(MoneyField, self).get_default()
def formfield(self, **kwargs):
defaults = {'form_class': forms.MoneyField}
defaults.update(kwargs)
defaults['currency_choices'] = self.currency_choices
return super(MoneyField, self).formfield(**defaults)
def get_south_default(self):
return '%s' % str(self.default)
def get_south_default_currency(self):
return '"%s"' % str(self.default_currency.code)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
## South support
def south_field_triple(self):
"Returns a suitable description of this field for South."
# Note: This method gets automatically with schemamigration time.
from south.modelsinspector import introspector
field_class = self.__class__.__module__ + "." + self.__class__.__name__
args, kwargs = introspector(self)
# We need to
# 1. Delete the default, 'cause it's not automatically supported.
kwargs.pop('default')
# 2. add the default currency, because it's not picked up from the inspector automatically.
kwargs['default_currency'] = "'%s'" % self.default_currency
return field_class, args, kwargs
try:
from south.modelsinspector import add_introspection_rules
rules = [
# MoneyField has its own method.
((CurrencyField,),
[], # No positional args
{'default': ('default', {'default': DEFAULT_CURRENCY.code}),
'max_length': ('max_length', {'default': 3})}),
]
# MoneyField implement the serialization in south_field_triple method
add_introspection_rules(rules, ["^djmoney\.models\.fields\.CurrencyField"])
except ImportError:
pass
def patch_managers(sender, **kwargs):
"""
Patches models managers
"""
from .managers import money_manager
if any(isinstance(field, MoneyField) for field in sender._meta.fields):
for _id, name, manager in sender._meta.concrete_managers:
setattr(sender, name, money_manager(manager))
class_prepared.connect(patch_managers) | en | 0.865863 | # Python 3 # 'unicode' is undefined, in Python 3 # Set to True or False has a higher priority # than USE_L10N == True in the django settings file. # The variable "self.use_l10n" has three states: Converts object of type MoneyPatched on the object of type Money. # Do not change. The variable "self.use_l10n" has three states: # True, False, and None. # Do not change. The variable "self.use_l10n" has three states: # True, False, and None. # if self.use_l10n == None >> # small fix for tests # Avoid giving the user hard-to-debug errors if they miss required attributes # Don't run on abstract classes # Do not change default=self.default_currency.code, needed # for south compat. # We need to return the numerical value if this is called by south ## South support # Note: This method gets automatically with schemamigration time. # We need to # 1. Delete the default, 'cause it's not automatically supported. # 2. add the default currency, because it's not picked up from the inspector automatically. # MoneyField has its own method. # No positional args # MoneyField implement the serialization in south_field_triple method Patches models managers | 1.982238 | 2 |
data_proc001_type.py | abanger/DCIC2019-Concrete-Pump-Vehicles | 9 | 6630438 | <reponame>abanger/DCIC2019-Concrete-Pump-Vehicles
# @author:abanger
# blog: https://abanger.github.io
# github: https://github.com/abanger/DCIC2019-Concrete-Pump-Vehicles
# # 数据整合,未处理
import pandas as pd
import os
from tqdm import *
path="./"
data_list = os.listdir(path+'data_train/')
def add_device_type(devicetype,lenbb):
chadd = pd.DataFrame(columns = ['ZVe44', 'ZV573', 'ZV63d', 'ZVfd4', 'ZVa9c', 'ZVa78', 'ZV252'])
for i in range(lenbb): #插入一行
chadd.loc[i] = [0 for n in range(7)]
chadd[devicetype]=1
#print(devicetype)
return chadd
file_name='data/data_all_n2_type.csv'
df = pd.read_csv(path+'data_train/'+ data_list[0])
lenbb=len(df)
devicetype=df.loc[0]['设备类型']
df = pd.concat( (df,add_device_type(devicetype,lenbb) ),axis=1)
df['sample_file_name'] = data_list[0]
df.to_csv(file_name, index=False,encoding='utf-8')
for i in tqdm(range(1, len(data_list))):
if data_list[i].split('.')[-1] == 'csv':
df = pd.read_csv(path+'data_train/' + data_list[i])
lenbb=len(df)
devicetype=df.loc[0]['设备类型']
df = pd.concat( (df,add_device_type(devicetype,lenbb) ),axis=1)
df['sample_file_name'] = data_list[i]
df.to_csv(file_name, index=False, header=False, mode='a+',encoding='utf-8')
else:
continue
test_data_list = os.listdir(path+'data_test/')
for i in tqdm(range(len(test_data_list))):
if test_data_list[i].split('.')[-1] == 'csv':
df = pd.read_csv(path+'data_test/' + test_data_list[i])
lenbb=len(df)
devicetype=df.loc[0]['设备类型']
df = pd.concat( (df,add_device_type(devicetype,lenbb) ),axis=1)
df['sample_file_name'] = test_data_list[i]
df.to_csv(file_name, index=False, header=False, mode='a+',encoding='utf-8')
else:
continue | # @author:abanger
# blog: https://abanger.github.io
# github: https://github.com/abanger/DCIC2019-Concrete-Pump-Vehicles
# # 数据整合,未处理
import pandas as pd
import os
from tqdm import *
path="./"
data_list = os.listdir(path+'data_train/')
def add_device_type(devicetype,lenbb):
chadd = pd.DataFrame(columns = ['ZVe44', 'ZV573', 'ZV63d', 'ZVfd4', 'ZVa9c', 'ZVa78', 'ZV252'])
for i in range(lenbb): #插入一行
chadd.loc[i] = [0 for n in range(7)]
chadd[devicetype]=1
#print(devicetype)
return chadd
file_name='data/data_all_n2_type.csv'
df = pd.read_csv(path+'data_train/'+ data_list[0])
lenbb=len(df)
devicetype=df.loc[0]['设备类型']
df = pd.concat( (df,add_device_type(devicetype,lenbb) ),axis=1)
df['sample_file_name'] = data_list[0]
df.to_csv(file_name, index=False,encoding='utf-8')
for i in tqdm(range(1, len(data_list))):
if data_list[i].split('.')[-1] == 'csv':
df = pd.read_csv(path+'data_train/' + data_list[i])
lenbb=len(df)
devicetype=df.loc[0]['设备类型']
df = pd.concat( (df,add_device_type(devicetype,lenbb) ),axis=1)
df['sample_file_name'] = data_list[i]
df.to_csv(file_name, index=False, header=False, mode='a+',encoding='utf-8')
else:
continue
test_data_list = os.listdir(path+'data_test/')
for i in tqdm(range(len(test_data_list))):
if test_data_list[i].split('.')[-1] == 'csv':
df = pd.read_csv(path+'data_test/' + test_data_list[i])
lenbb=len(df)
devicetype=df.loc[0]['设备类型']
df = pd.concat( (df,add_device_type(devicetype,lenbb) ),axis=1)
df['sample_file_name'] = test_data_list[i]
df.to_csv(file_name, index=False, header=False, mode='a+',encoding='utf-8')
else:
continue | zh | 0.245343 | # @author:abanger # blog: https://abanger.github.io # github: https://github.com/abanger/DCIC2019-Concrete-Pump-Vehicles # # 数据整合,未处理 #插入一行 #print(devicetype) | 2.473689 | 2 |
source/meta_compare/image_clasification/sls_image_classification.py | geez0219/ARC | 1 | 6630439 | <gh_stars>1-10
import fastestimator as fe
import sls
import torch
import torch.nn as nn
import torch.nn.functional as fn
from fastestimator.op.numpyop.meta import Sometimes
from fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop
from fastestimator.op.numpyop.univariate import ChannelTranspose, CoarseDropout, Normalize
from fastestimator.op.tensorop.loss import CrossEntropy
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.trace.metric import Accuracy
class DummpyUpdate(UpdateOp):
def forward(self, data, state):
pass
class FastCifar(nn.Module):
def __init__(self):
super().__init__()
self.conv0 = nn.Conv2d(3, 64, 3, padding=(1, 1))
self.conv0_bn = nn.BatchNorm2d(64, momentum=0.8)
self.conv1 = nn.Conv2d(64, 128, 3, padding=(1, 1))
self.conv1_bn = nn.BatchNorm2d(128, momentum=0.8)
self.residual1 = Residual(128, 128)
self.conv2 = nn.Conv2d(128, 256, 3, padding=(1, 1))
self.conv2_bn = nn.BatchNorm2d(256, momentum=0.8)
self.residual2 = Residual(256, 256)
self.conv3 = nn.Conv2d(256, 512, 3, padding=(1, 1))
self.conv3_bn = nn.BatchNorm2d(512, momentum=0.8)
self.residual3 = Residual(512, 512)
self.fc1 = nn.Linear(512, 10)
def forward(self, x):
# prep layer
x = self.conv0(x)
x = self.conv0_bn(x)
x = fn.leaky_relu(x, negative_slope=0.1)
# layer 1
x = self.conv1(x)
x = fn.max_pool2d(x, 2)
x = self.conv1_bn(x)
x = fn.leaky_relu(x, negative_slope=0.1)
x = x + self.residual1(x)
# layer 2
x = self.conv2(x)
x = fn.max_pool2d(x, 2)
x = self.conv2_bn(x)
x = fn.leaky_relu(x, negative_slope=0.1)
x = x + self.residual2(x)
# layer 3
x = self.conv3(x)
x = fn.max_pool2d(x, 2)
x = self.conv3_bn(x)
x = fn.leaky_relu(x, negative_slope=0.1)
x = x + self.residual3(x)
# layer 4
# Storing kernel size as a list in case the user needs to export the model to ONNX
# As ONNX doesn't support dynamic kernel size
size_array = [int(s) for s in x.size()[2:]]
x = fn.max_pool2d(x, kernel_size=size_array)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = fn.softmax(x, dim=-1)
return x
class Residual(nn.Module):
def __init__(self, channel_in, channel_out):
super().__init__()
self.conv1 = nn.Conv2d(channel_in, channel_out, 3, padding=(1, 1))
self.conv1_bn = nn.BatchNorm2d(channel_out)
self.conv2 = nn.Conv2d(channel_out, channel_out, 3, padding=(1, 1))
self.conv2_bn = nn.BatchNorm2d(channel_out)
def forward(self, x):
x = self.conv1(x)
x = self.conv1_bn(x)
x = fn.leaky_relu(x, negative_slope=0.1)
x = self.conv2(x)
x = self.conv2_bn(x)
x = fn.leaky_relu(x, negative_slope=0.1)
return x
class SGDLinesSearch(fe.op.tensorop.TensorOp):
def __init__(self, model, opt, loss_op, inputs, outputs, mode="train"):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.model = model
self.opt = opt
self.loss_op = loss_op
def forward(self, data, state):
x, y = data
closure = lambda: self.loss_op.forward((self.model(x), y), state=state)
self.opt.zero_grad()
loss = self.opt.step(closure=closure)
return loss
class PrintLR(fe.trace.Trace):
def __init__(self, opt):
super().__init__(mode="train")
self.opt = opt
def on_batch_end(self, data):
if self.system.global_step % self.system.log_steps == 0 or self.system.global_step == 1:
data.write_with_log("model_lr", float(self.opt.state['step_size']))
def get_estimator(epochs=30, batch_size=128):
# step 1
train_data, eval_data = fe.dataset.data.cifar10.load_data()
pipeline = fe.Pipeline(
train_data=train_data,
eval_data=eval_data,
batch_size=batch_size,
ops=[
Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),
PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"),
RandomCrop(32, 32, image_in="x", image_out="x", mode="train"),
Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")),
CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1),
ChannelTranspose(inputs="x", outputs="x")
])
# step 2
model = fe.build(model_fn=FastCifar, optimizer_fn="sgd")
opt = sls.Sls(model.parameters())
network = fe.Network(ops=[
ModelOp(model=model, inputs="x", outputs="y_pred"),
SGDLinesSearch(model=model,
opt=opt,
loss_op=CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
inputs=("x", "y"),
outputs="ce"),
CrossEntropy(inputs=("y_pred", "y"), outputs="ce", mode="eval"),
DummpyUpdate(model=model, loss_name="ce")
])
# step 3
traces = [Accuracy(true_key="y", pred_key="y_pred"), PrintLR(opt=opt)]
estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces)
return estimator
| import fastestimator as fe
import sls
import torch
import torch.nn as nn
import torch.nn.functional as fn
from fastestimator.op.numpyop.meta import Sometimes
from fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop
from fastestimator.op.numpyop.univariate import ChannelTranspose, CoarseDropout, Normalize
from fastestimator.op.tensorop.loss import CrossEntropy
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.trace.metric import Accuracy
class DummpyUpdate(UpdateOp):
def forward(self, data, state):
pass
class FastCifar(nn.Module):
def __init__(self):
super().__init__()
self.conv0 = nn.Conv2d(3, 64, 3, padding=(1, 1))
self.conv0_bn = nn.BatchNorm2d(64, momentum=0.8)
self.conv1 = nn.Conv2d(64, 128, 3, padding=(1, 1))
self.conv1_bn = nn.BatchNorm2d(128, momentum=0.8)
self.residual1 = Residual(128, 128)
self.conv2 = nn.Conv2d(128, 256, 3, padding=(1, 1))
self.conv2_bn = nn.BatchNorm2d(256, momentum=0.8)
self.residual2 = Residual(256, 256)
self.conv3 = nn.Conv2d(256, 512, 3, padding=(1, 1))
self.conv3_bn = nn.BatchNorm2d(512, momentum=0.8)
self.residual3 = Residual(512, 512)
self.fc1 = nn.Linear(512, 10)
def forward(self, x):
# prep layer
x = self.conv0(x)
x = self.conv0_bn(x)
x = fn.leaky_relu(x, negative_slope=0.1)
# layer 1
x = self.conv1(x)
x = fn.max_pool2d(x, 2)
x = self.conv1_bn(x)
x = fn.leaky_relu(x, negative_slope=0.1)
x = x + self.residual1(x)
# layer 2
x = self.conv2(x)
x = fn.max_pool2d(x, 2)
x = self.conv2_bn(x)
x = fn.leaky_relu(x, negative_slope=0.1)
x = x + self.residual2(x)
# layer 3
x = self.conv3(x)
x = fn.max_pool2d(x, 2)
x = self.conv3_bn(x)
x = fn.leaky_relu(x, negative_slope=0.1)
x = x + self.residual3(x)
# layer 4
# Storing kernel size as a list in case the user needs to export the model to ONNX
# As ONNX doesn't support dynamic kernel size
size_array = [int(s) for s in x.size()[2:]]
x = fn.max_pool2d(x, kernel_size=size_array)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = fn.softmax(x, dim=-1)
return x
class Residual(nn.Module):
def __init__(self, channel_in, channel_out):
super().__init__()
self.conv1 = nn.Conv2d(channel_in, channel_out, 3, padding=(1, 1))
self.conv1_bn = nn.BatchNorm2d(channel_out)
self.conv2 = nn.Conv2d(channel_out, channel_out, 3, padding=(1, 1))
self.conv2_bn = nn.BatchNorm2d(channel_out)
def forward(self, x):
x = self.conv1(x)
x = self.conv1_bn(x)
x = fn.leaky_relu(x, negative_slope=0.1)
x = self.conv2(x)
x = self.conv2_bn(x)
x = fn.leaky_relu(x, negative_slope=0.1)
return x
class SGDLinesSearch(fe.op.tensorop.TensorOp):
def __init__(self, model, opt, loss_op, inputs, outputs, mode="train"):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.model = model
self.opt = opt
self.loss_op = loss_op
def forward(self, data, state):
x, y = data
closure = lambda: self.loss_op.forward((self.model(x), y), state=state)
self.opt.zero_grad()
loss = self.opt.step(closure=closure)
return loss
class PrintLR(fe.trace.Trace):
def __init__(self, opt):
super().__init__(mode="train")
self.opt = opt
def on_batch_end(self, data):
if self.system.global_step % self.system.log_steps == 0 or self.system.global_step == 1:
data.write_with_log("model_lr", float(self.opt.state['step_size']))
def get_estimator(epochs=30, batch_size=128):
# step 1
train_data, eval_data = fe.dataset.data.cifar10.load_data()
pipeline = fe.Pipeline(
train_data=train_data,
eval_data=eval_data,
batch_size=batch_size,
ops=[
Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),
PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"),
RandomCrop(32, 32, image_in="x", image_out="x", mode="train"),
Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")),
CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1),
ChannelTranspose(inputs="x", outputs="x")
])
# step 2
model = fe.build(model_fn=FastCifar, optimizer_fn="sgd")
opt = sls.Sls(model.parameters())
network = fe.Network(ops=[
ModelOp(model=model, inputs="x", outputs="y_pred"),
SGDLinesSearch(model=model,
opt=opt,
loss_op=CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
inputs=("x", "y"),
outputs="ce"),
CrossEntropy(inputs=("y_pred", "y"), outputs="ce", mode="eval"),
DummpyUpdate(model=model, loss_name="ce")
])
# step 3
traces = [Accuracy(true_key="y", pred_key="y_pred"), PrintLR(opt=opt)]
estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces)
return estimator | en | 0.741065 | # prep layer # layer 1 # layer 2 # layer 3 # layer 4 # Storing kernel size as a list in case the user needs to export the model to ONNX # As ONNX doesn't support dynamic kernel size # step 1 # step 2 # step 3 | 2.335944 | 2 |
manage.py | Gillingham/evething | 33 | 6630440 | #!/usr/bin/env python
import os
import sys
# Enforce Django 1.5
from django import get_version
if get_version() < '1.5':
print
print 'ERROR: EVEthing requires Django version 1.5 or above!'
print
sys.exit(1)
# try using cdecimal for faster Decimal type
try:
import cdecimal
except ImportError:
pass
else:
sys.modules["decimal"] = cdecimal
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "evething.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| #!/usr/bin/env python
import os
import sys
# Enforce Django 1.5
from django import get_version
if get_version() < '1.5':
print
print 'ERROR: EVEthing requires Django version 1.5 or above!'
print
sys.exit(1)
# try using cdecimal for faster Decimal type
try:
import cdecimal
except ImportError:
pass
else:
sys.modules["decimal"] = cdecimal
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "evething.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| en | 0.369425 | #!/usr/bin/env python # Enforce Django 1.5 # try using cdecimal for faster Decimal type | 1.826491 | 2 |
interpret_tools.py | AlexisNaN/BAS-PRO_plotting | 0 | 6630441 | <reponame>AlexisNaN/BAS-PRO_plotting
import interpret_cdf
import colorsys
import sys
import numpy as np
from math import sqrt, asin, pi
mass0_proton = 1.6726219e-27
MeV2J = 1.60218e-13
c_ = 299792458
def userselectkey(filedict, allowmulti=False):
#user selects keys from a dictionary of items supplied as argument
#input is sanitised and returned as a list of keys
for key in filedict.keys():
print(key, '...', filedict[key])
filedict_selectkeys = [] #save the selection as keys
#ask the user to selet results files to plot:
decided = False
more = False
while not decided:
choice = input("> ")
#sanitise the input and re-ask if necessary:
try:
if choice[-1] == ",":
more = True * allowmulti
choice = choice[:-1]
else:
more = False
choice = int(choice)
if choice in filedict.keys():
if choice in filedict_selectkeys:
print (" already selected")
else:
filedict_selectkeys.append(choice)
if not more:
decided = True
else:
print(" out of range")
except:
print(" invalid")
pass
return filedict_selectkeys
def userinputfloat(allowblank=False, allowmulti=False):
floatselectkeys = [] #save the selection as keys
decided = False
more = False
while not decided:
choice = input("> ")
if choice == "":
if allowblank:
decided = True
floatselectkeys.append(False)
else:
print(" a value must be specified")
else:
#sanitise the input and re-ask if necessary:
try:
if choice[-1] == ",":
more = True * allowmulti
choice = choice[:-1]
else:
more = False
choice = float(choice)
if choice in floatselectkeys:
print (" already selected")
else:
floatselectkeys.append(choice)
if not more:
decided = True
except:
print(" invalid")
pass
return floatselectkeys
def get_N_HexCol(N):
HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]
RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)
return RGB_tuples
def getlastline(fname):
with open(fname, 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
return last_line.strip("\n")
# def bytes2float(bytestring):
# return float(((bytestring).decode("utf-8")).strip(' ').strip('\n'))
# def bytes2str(bytestring):
# return str(((bytestring).decode("utf-8")).strip(' ').strip('\n'))
# def eqL2B(Larray):
# B0 = 3.12e-5
# T2G = 1.0e4
# return T2G*B0*(np.power(np.reciprocal(Larray),3))
def getcol(n):
plotnshuffle = 0 #shift the linestyle/colour scheme/label of the set of lines
n = plotnshuffle + n
#ncols = [x for x in get_N_HexCol(len(filedict_plotkey))]
ncols = ['blue',"#3cb44b",'deepskyblue']
return ncols[n%len(ncols)]
def get_axis_file(fname):
with open(fname) as fi:
ax_ = fi.readlines()
for idxL in range(0, len(ax_)):
ax_[idxL] = float(ax_[idxL].strip('/n'))
ax_ = np.array(ax_)
return ax_
def get_sol_file(fname):
lines = []
with open(fname) as fo:
lines = fo.readlines()
sol_f = []
for idx in range(0, len(lines)):
sol_f.append([float(x) for x in lines[idx].strip('\n').split()])
sol_f = np.array(sol_f)
return sol_f
def get_gamma(ke_MeV):
#calculate gamma of a proton given its kinetic energy in MeV
ke_J = ke_MeV * MeV2J
erest_J = mass0_proton*c_*c_
gamma = 1 + ke_J/(erest_J)
return gamma
def get_pr(ke_MeV):
# !calculate relativistic momentum in SI units (kg m s-1) of a proton given its kinetic energy in MeV
erest_J = mass0_proton*c_*c_
gamma = get_gamma(ke_MeV)
mr = mass0_proton * gamma
etot_J = mr*c_*c_
p_ = np.sqrt((etot_J**2) - (erest_J**2))/c_ #relativistic momentum kg m/s
return p_
def f2j(energy, psd):
# !
# ! input units of energy: MeV
# ! input units of phase space density: m-6 s3 kg-3
# !
# !
#change units to m-6 s3:
temp = psd / 1e18
# units: m-6 s3
temp = temp / (mass0_proton**3.)
# units are m-6 s3 kg-3
#get momentum squared:
p2 = (get_pr(energy) **2.)
# units: kg2 m2 s-2, or: ** kg J **
flux = temp * p2
# units: m-2 s-1 str-1 J-1
flux = flux / 1e4
# units: cm-2 s-1 str-1 J-1
flux = flux * MeV2J
# units: cm-2 s-1 str-1 MeV-1
return flux
def j2f(energy, flux):
# !
# ! input units of energy: MeV
# ! input units of flux: cm-2 s-1 str-1 MeV-1
# !
# !
#get momentum squared:
p2 = (get_pr(energy) **2.)
# units: kg2 m2 s-2, or: ** kg J **
flux = flux / MeV2J
# units: cm-2 s-1 str-1 J-1
flux = flux * 1e4
# units: m-2 s-1 str-1 J-1
temp = flux / p2
temp = temp * (mass0_proton**3.)
psd = temp * 1e18
# units: m-6 s3 kg-3
return psd
def get_colour_from_time(time, ax_t, cmap):
if (len(ax_t) > 1):
frac = (time-ax_t[0])/(ax_t[-1]-ax_t[0])
else:
frac=0
return cmap(frac)
def f_sinn(x, A, b, c, n):
# python does not like a negative number to a decimal power
# p0 should be something like [10, 0, 25, 4] in practise
d2r = np.pi / 180.
sinn = np.abs(np.sin((x+b)*d2r))
return A * np.power(sinn,n) + c
def f_sinn_simple(x, A, n):
# python does not like a negative number to a decimal power
# p0 should be something like [10, 0, 25, 4] in practise
d2r = np.pi / 180.
sinn = np.abs(np.sin((x)*d2r))
return A * np.power(sinn,n)
def get_lc(Lb): #centred dipole loss cone approximation for 2015
RE = 6.3712e6
atm_height_std = 100000
B0 = 2.986731323946967e-05
ra = (RE + atm_height_std)/RE #~Earth's surface + atm_height_dipolelc m
if ra >= Lb:
return 90
else:
Ba = (B0/(ra**3)) * (4 - 3*ra/Lb)**(0.5)
dipole_lc = asin(sqrt((B0 / Lb**3)/Ba)) * 180 / pi
return dipole_lc
def getpad_(cdf, L_extract, en, dynamic, time_plot):
#
#
# WARNING: when interpolating between L output by the model, we can't determine where the loss cone is!
# so the PAD will be returned with a point at (0,0) but then shoot up to the first point outside the lc
#
#
#returns pitch angle distribution alpha and corresponding f, straight from the solution grid
ax_t = cdf[interpret_cdf.lab_axt]
ax_mu = cdf[interpret_cdf.lab_axmu]
ax_K = cdf[interpret_cdf.lab_axK]
ax_L = cdf[interpret_cdf.lab_axL]
map_alpha = cdf[interpret_cdf.lab_map]
sol_f1d_allK = []
sol_alpha_allK = []
for idx_K in range(len(ax_K)):
K_now = ax_K[idx_K]
sol_en = cdf[interpret_cdf.lab_en][0, :, idx_K, :]
sol_f = cdf[interpret_cdf.lab_f][:, :, idx_K, :]
#find the minimum L that is outside the loss cone at the current K:
idxL_outsidelc = np.argwhere(map_alpha[:,idx_K]>0)[0][0]
#check if L is out of range for interpolation at this K:
if L_extract < np.min(ax_L[idxL_outsidelc:]) or L_extract > np.max(ax_L[idxL_outsidelc:]):
continue
#get alpha from the map file, but ignore fill values:
sol_alpha = np.interp(L_extract, ax_L[idxL_outsidelc:], map_alpha[:,idx_K][idxL_outsidelc:])
#check the energy array too: we may have defined pitch angle at every K for nL (model artifact)
if np.sum(sol_en[:,idxL_outsidelc:len(ax_L)]<0.) > 0:
#if there are any elements below 0 in sol_en:
continue
#show a warning if energy is out of range for interpolation:
for idxL in range(idxL_outsidelc, len(ax_L)):
if (en > sol_en[-1, idxL] or en < sol_en[0, idxL]):
print("Error: energy {:.2f}MeV is out of bounds at alpha={:.2f} (iK={})".format(en,sol_alpha,idx_K+1))
sys.exit(1)
if dynamic: #interpolate to the current t we need:
#get idx_t_0 and idx_t_1 surrounding the time we want to plot:
idx_t_0 = -1
idx_t_1 = idx_t_0
if time_plot < ax_t[0] or time_plot > ax_t[-1]:
print("","Error: time_plot is out of range on K idx", idx_K)
sys.exit(1)
for idx_t, time_sol in enumerate(ax_t):
if time_plot >= time_sol:
idx_t_0 = idx_t
if time_plot == time_sol:
idx_t_1 = idx_t_0 #we 'interpolate' across 1 grid
else:
idx_t_1 = idx_t + 1
else:
break
#idx_extent_f_required = [idx_t_0*len(ax_mu), (1+idx_t_1)*len(ax_mu)] #from the first row of idx_t_0 to last of idx_t_1
#idx_ofst = idx_extent_f_required[0]
#sol_f = unpack_fileset_fonly_part(fileset, idx_extent_f_required)
#get f at every L at the energy under investigation:
# t0
sol_f1d_t_0 = []
for idxL in range(idxL_outsidelc, len(ax_L)):
#sol_f1d_t_0.append(float(np.interp(en, sol_en[:, idxL], sol_f[idx_t_0*len(ax_mu)-idx_ofst:(1+idx_t_0)*len(ax_mu)-idx_ofst, idxL])))
sol_f1d_t_0.append(np.interp(en, sol_en[:, idxL], sol_f[idx_t_0, :, idxL]))
# get f at the L under investigation:
sol_f1d_t_0 = np.interp(L_extract, ax_L[idxL_outsidelc:], sol_f1d_t_0)
if not (ax_t[idx_t_0] == ax_t[idx_t_1] ): #skip interpolating from the second surrounding time
# t1
sol_f1d_t_1 = []
for idxL in range(idxL_outsidelc, len(ax_L)):
#sol_f1d_t_1.append(float(np.interp(en, sol_en[:, idxL], sol_f[idx_t_1*len(ax_mu)-idx_ofst:(1+idx_t_1)*len(ax_mu)-idx_ofst, idxL])))
sol_f1d_t_1.append(np.interp(en, sol_en[:, idxL], sol_f[idx_t_1, :, idxL]))
# get f at the L under investigation:
sol_f1d_t_1 = np.interp(L_extract, ax_L[idxL_outsidelc:], sol_f1d_t_1)
# interpolate to t:
ax_t_surround = [ax_t[idx_t_0], ax_t[idx_t_1]]
f_surround = [sol_f1d_t_0, sol_f1d_t_1]
sol_f1d = np.interp(time_plot, ax_t_surround, f_surround)
else:
sol_f1d = sol_f1d_t_0
else:
idx_t = 0
#get f at every L at the energy under investigation:
sol_f1d = []
for idxL in range(idxL_outsidelc, len(ax_L)):
#sol_f1d.append(float(np.interp(en, sol_en[:, idxL], sol_f[idx_t*len(ax_mu):(1+idx_t)*len(ax_mu), idxL])))
sol_f1d.append(np.interp(en, sol_en[:, idxL], sol_f[idx_t, :, idxL]))
#get f at the L under investigation:
sol_f1d = np.interp(L_extract, ax_L[idxL_outsidelc:], sol_f1d)
#print (sol_alpha, sol_f1d_t_0, sol_f1d_t_1)
sol_f1d_allK.append(sol_f1d)
sol_alpha_allK.append(sol_alpha)
#add zero to the beginning of the array and reverse it to have K=0 last
sol_f = np.array([0.]+sol_f1d_allK[::-1])
sol_alpha = np.array([0.]+sol_alpha_allK[::-1])
return sol_alpha, sol_f
| import interpret_cdf
import colorsys
import sys
import numpy as np
from math import sqrt, asin, pi
mass0_proton = 1.6726219e-27
MeV2J = 1.60218e-13
c_ = 299792458
def userselectkey(filedict, allowmulti=False):
#user selects keys from a dictionary of items supplied as argument
#input is sanitised and returned as a list of keys
for key in filedict.keys():
print(key, '...', filedict[key])
filedict_selectkeys = [] #save the selection as keys
#ask the user to selet results files to plot:
decided = False
more = False
while not decided:
choice = input("> ")
#sanitise the input and re-ask if necessary:
try:
if choice[-1] == ",":
more = True * allowmulti
choice = choice[:-1]
else:
more = False
choice = int(choice)
if choice in filedict.keys():
if choice in filedict_selectkeys:
print (" already selected")
else:
filedict_selectkeys.append(choice)
if not more:
decided = True
else:
print(" out of range")
except:
print(" invalid")
pass
return filedict_selectkeys
def userinputfloat(allowblank=False, allowmulti=False):
floatselectkeys = [] #save the selection as keys
decided = False
more = False
while not decided:
choice = input("> ")
if choice == "":
if allowblank:
decided = True
floatselectkeys.append(False)
else:
print(" a value must be specified")
else:
#sanitise the input and re-ask if necessary:
try:
if choice[-1] == ",":
more = True * allowmulti
choice = choice[:-1]
else:
more = False
choice = float(choice)
if choice in floatselectkeys:
print (" already selected")
else:
floatselectkeys.append(choice)
if not more:
decided = True
except:
print(" invalid")
pass
return floatselectkeys
def get_N_HexCol(N):
HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]
RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)
return RGB_tuples
def getlastline(fname):
with open(fname, 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
return last_line.strip("\n")
# def bytes2float(bytestring):
# return float(((bytestring).decode("utf-8")).strip(' ').strip('\n'))
# def bytes2str(bytestring):
# return str(((bytestring).decode("utf-8")).strip(' ').strip('\n'))
# def eqL2B(Larray):
# B0 = 3.12e-5
# T2G = 1.0e4
# return T2G*B0*(np.power(np.reciprocal(Larray),3))
def getcol(n):
plotnshuffle = 0 #shift the linestyle/colour scheme/label of the set of lines
n = plotnshuffle + n
#ncols = [x for x in get_N_HexCol(len(filedict_plotkey))]
ncols = ['blue',"#3cb44b",'deepskyblue']
return ncols[n%len(ncols)]
def get_axis_file(fname):
with open(fname) as fi:
ax_ = fi.readlines()
for idxL in range(0, len(ax_)):
ax_[idxL] = float(ax_[idxL].strip('/n'))
ax_ = np.array(ax_)
return ax_
def get_sol_file(fname):
lines = []
with open(fname) as fo:
lines = fo.readlines()
sol_f = []
for idx in range(0, len(lines)):
sol_f.append([float(x) for x in lines[idx].strip('\n').split()])
sol_f = np.array(sol_f)
return sol_f
def get_gamma(ke_MeV):
#calculate gamma of a proton given its kinetic energy in MeV
ke_J = ke_MeV * MeV2J
erest_J = mass0_proton*c_*c_
gamma = 1 + ke_J/(erest_J)
return gamma
def get_pr(ke_MeV):
# !calculate relativistic momentum in SI units (kg m s-1) of a proton given its kinetic energy in MeV
erest_J = mass0_proton*c_*c_
gamma = get_gamma(ke_MeV)
mr = mass0_proton * gamma
etot_J = mr*c_*c_
p_ = np.sqrt((etot_J**2) - (erest_J**2))/c_ #relativistic momentum kg m/s
return p_
def f2j(energy, psd):
# !
# ! input units of energy: MeV
# ! input units of phase space density: m-6 s3 kg-3
# !
# !
#change units to m-6 s3:
temp = psd / 1e18
# units: m-6 s3
temp = temp / (mass0_proton**3.)
# units are m-6 s3 kg-3
#get momentum squared:
p2 = (get_pr(energy) **2.)
# units: kg2 m2 s-2, or: ** kg J **
flux = temp * p2
# units: m-2 s-1 str-1 J-1
flux = flux / 1e4
# units: cm-2 s-1 str-1 J-1
flux = flux * MeV2J
# units: cm-2 s-1 str-1 MeV-1
return flux
def j2f(energy, flux):
# !
# ! input units of energy: MeV
# ! input units of flux: cm-2 s-1 str-1 MeV-1
# !
# !
#get momentum squared:
p2 = (get_pr(energy) **2.)
# units: kg2 m2 s-2, or: ** kg J **
flux = flux / MeV2J
# units: cm-2 s-1 str-1 J-1
flux = flux * 1e4
# units: m-2 s-1 str-1 J-1
temp = flux / p2
temp = temp * (mass0_proton**3.)
psd = temp * 1e18
# units: m-6 s3 kg-3
return psd
def get_colour_from_time(time, ax_t, cmap):
if (len(ax_t) > 1):
frac = (time-ax_t[0])/(ax_t[-1]-ax_t[0])
else:
frac=0
return cmap(frac)
def f_sinn(x, A, b, c, n):
# python does not like a negative number to a decimal power
# p0 should be something like [10, 0, 25, 4] in practise
d2r = np.pi / 180.
sinn = np.abs(np.sin((x+b)*d2r))
return A * np.power(sinn,n) + c
def f_sinn_simple(x, A, n):
# python does not like a negative number to a decimal power
# p0 should be something like [10, 0, 25, 4] in practise
d2r = np.pi / 180.
sinn = np.abs(np.sin((x)*d2r))
return A * np.power(sinn,n)
def get_lc(Lb): #centred dipole loss cone approximation for 2015
RE = 6.3712e6
atm_height_std = 100000
B0 = 2.986731323946967e-05
ra = (RE + atm_height_std)/RE #~Earth's surface + atm_height_dipolelc m
if ra >= Lb:
return 90
else:
Ba = (B0/(ra**3)) * (4 - 3*ra/Lb)**(0.5)
dipole_lc = asin(sqrt((B0 / Lb**3)/Ba)) * 180 / pi
return dipole_lc
def getpad_(cdf, L_extract, en, dynamic, time_plot):
#
#
# WARNING: when interpolating between L output by the model, we can't determine where the loss cone is!
# so the PAD will be returned with a point at (0,0) but then shoot up to the first point outside the lc
#
#
#returns pitch angle distribution alpha and corresponding f, straight from the solution grid
ax_t = cdf[interpret_cdf.lab_axt]
ax_mu = cdf[interpret_cdf.lab_axmu]
ax_K = cdf[interpret_cdf.lab_axK]
ax_L = cdf[interpret_cdf.lab_axL]
map_alpha = cdf[interpret_cdf.lab_map]
sol_f1d_allK = []
sol_alpha_allK = []
for idx_K in range(len(ax_K)):
K_now = ax_K[idx_K]
sol_en = cdf[interpret_cdf.lab_en][0, :, idx_K, :]
sol_f = cdf[interpret_cdf.lab_f][:, :, idx_K, :]
#find the minimum L that is outside the loss cone at the current K:
idxL_outsidelc = np.argwhere(map_alpha[:,idx_K]>0)[0][0]
#check if L is out of range for interpolation at this K:
if L_extract < np.min(ax_L[idxL_outsidelc:]) or L_extract > np.max(ax_L[idxL_outsidelc:]):
continue
#get alpha from the map file, but ignore fill values:
sol_alpha = np.interp(L_extract, ax_L[idxL_outsidelc:], map_alpha[:,idx_K][idxL_outsidelc:])
#check the energy array too: we may have defined pitch angle at every K for nL (model artifact)
if np.sum(sol_en[:,idxL_outsidelc:len(ax_L)]<0.) > 0:
#if there are any elements below 0 in sol_en:
continue
#show a warning if energy is out of range for interpolation:
for idxL in range(idxL_outsidelc, len(ax_L)):
if (en > sol_en[-1, idxL] or en < sol_en[0, idxL]):
print("Error: energy {:.2f}MeV is out of bounds at alpha={:.2f} (iK={})".format(en,sol_alpha,idx_K+1))
sys.exit(1)
if dynamic: #interpolate to the current t we need:
#get idx_t_0 and idx_t_1 surrounding the time we want to plot:
idx_t_0 = -1
idx_t_1 = idx_t_0
if time_plot < ax_t[0] or time_plot > ax_t[-1]:
print("","Error: time_plot is out of range on K idx", idx_K)
sys.exit(1)
for idx_t, time_sol in enumerate(ax_t):
if time_plot >= time_sol:
idx_t_0 = idx_t
if time_plot == time_sol:
idx_t_1 = idx_t_0 #we 'interpolate' across 1 grid
else:
idx_t_1 = idx_t + 1
else:
break
#idx_extent_f_required = [idx_t_0*len(ax_mu), (1+idx_t_1)*len(ax_mu)] #from the first row of idx_t_0 to last of idx_t_1
#idx_ofst = idx_extent_f_required[0]
#sol_f = unpack_fileset_fonly_part(fileset, idx_extent_f_required)
#get f at every L at the energy under investigation:
# t0
sol_f1d_t_0 = []
for idxL in range(idxL_outsidelc, len(ax_L)):
#sol_f1d_t_0.append(float(np.interp(en, sol_en[:, idxL], sol_f[idx_t_0*len(ax_mu)-idx_ofst:(1+idx_t_0)*len(ax_mu)-idx_ofst, idxL])))
sol_f1d_t_0.append(np.interp(en, sol_en[:, idxL], sol_f[idx_t_0, :, idxL]))
# get f at the L under investigation:
sol_f1d_t_0 = np.interp(L_extract, ax_L[idxL_outsidelc:], sol_f1d_t_0)
if not (ax_t[idx_t_0] == ax_t[idx_t_1] ): #skip interpolating from the second surrounding time
# t1
sol_f1d_t_1 = []
for idxL in range(idxL_outsidelc, len(ax_L)):
#sol_f1d_t_1.append(float(np.interp(en, sol_en[:, idxL], sol_f[idx_t_1*len(ax_mu)-idx_ofst:(1+idx_t_1)*len(ax_mu)-idx_ofst, idxL])))
sol_f1d_t_1.append(np.interp(en, sol_en[:, idxL], sol_f[idx_t_1, :, idxL]))
# get f at the L under investigation:
sol_f1d_t_1 = np.interp(L_extract, ax_L[idxL_outsidelc:], sol_f1d_t_1)
# interpolate to t:
ax_t_surround = [ax_t[idx_t_0], ax_t[idx_t_1]]
f_surround = [sol_f1d_t_0, sol_f1d_t_1]
sol_f1d = np.interp(time_plot, ax_t_surround, f_surround)
else:
sol_f1d = sol_f1d_t_0
else:
idx_t = 0
#get f at every L at the energy under investigation:
sol_f1d = []
for idxL in range(idxL_outsidelc, len(ax_L)):
#sol_f1d.append(float(np.interp(en, sol_en[:, idxL], sol_f[idx_t*len(ax_mu):(1+idx_t)*len(ax_mu), idxL])))
sol_f1d.append(np.interp(en, sol_en[:, idxL], sol_f[idx_t, :, idxL]))
#get f at the L under investigation:
sol_f1d = np.interp(L_extract, ax_L[idxL_outsidelc:], sol_f1d)
#print (sol_alpha, sol_f1d_t_0, sol_f1d_t_1)
sol_f1d_allK.append(sol_f1d)
sol_alpha_allK.append(sol_alpha)
#add zero to the beginning of the array and reverse it to have K=0 last
sol_f = np.array([0.]+sol_f1d_allK[::-1])
sol_alpha = np.array([0.]+sol_alpha_allK[::-1])
return sol_alpha, sol_f | en | 0.645927 | #user selects keys from a dictionary of items supplied as argument #input is sanitised and returned as a list of keys #save the selection as keys #ask the user to selet results files to plot: #sanitise the input and re-ask if necessary: #save the selection as keys #sanitise the input and re-ask if necessary: # def bytes2float(bytestring): # return float(((bytestring).decode("utf-8")).strip(' ').strip('\n')) # def bytes2str(bytestring): # return str(((bytestring).decode("utf-8")).strip(' ').strip('\n')) # def eqL2B(Larray): # B0 = 3.12e-5 # T2G = 1.0e4 # return T2G*B0*(np.power(np.reciprocal(Larray),3)) #shift the linestyle/colour scheme/label of the set of lines #ncols = [x for x in get_N_HexCol(len(filedict_plotkey))] #calculate gamma of a proton given its kinetic energy in MeV # !calculate relativistic momentum in SI units (kg m s-1) of a proton given its kinetic energy in MeV #relativistic momentum kg m/s # ! # ! input units of energy: MeV # ! input units of phase space density: m-6 s3 kg-3 # ! # ! #change units to m-6 s3: # units: m-6 s3 # units are m-6 s3 kg-3 #get momentum squared: # units: kg2 m2 s-2, or: ** kg J ** # units: m-2 s-1 str-1 J-1 # units: cm-2 s-1 str-1 J-1 # units: cm-2 s-1 str-1 MeV-1 # ! # ! input units of energy: MeV # ! input units of flux: cm-2 s-1 str-1 MeV-1 # ! # ! #get momentum squared: # units: kg2 m2 s-2, or: ** kg J ** # units: cm-2 s-1 str-1 J-1 # units: m-2 s-1 str-1 J-1 # units: m-6 s3 kg-3 # python does not like a negative number to a decimal power # p0 should be something like [10, 0, 25, 4] in practise # python does not like a negative number to a decimal power # p0 should be something like [10, 0, 25, 4] in practise #centred dipole loss cone approximation for 2015 #~Earth's surface + atm_height_dipolelc m # # # WARNING: when interpolating between L output by the model, we can't determine where the loss cone is! # so the PAD will be returned with a point at (0,0) but then shoot up to the first point outside the lc # # #returns pitch angle distribution alpha and corresponding f, straight from the solution grid #find the minimum L that is outside the loss cone at the current K: #check if L is out of range for interpolation at this K: #get alpha from the map file, but ignore fill values: #check the energy array too: we may have defined pitch angle at every K for nL (model artifact) #if there are any elements below 0 in sol_en: #show a warning if energy is out of range for interpolation: #interpolate to the current t we need: #get idx_t_0 and idx_t_1 surrounding the time we want to plot: #we 'interpolate' across 1 grid #idx_extent_f_required = [idx_t_0*len(ax_mu), (1+idx_t_1)*len(ax_mu)] #from the first row of idx_t_0 to last of idx_t_1 #idx_ofst = idx_extent_f_required[0] #sol_f = unpack_fileset_fonly_part(fileset, idx_extent_f_required) #get f at every L at the energy under investigation: # t0 #sol_f1d_t_0.append(float(np.interp(en, sol_en[:, idxL], sol_f[idx_t_0*len(ax_mu)-idx_ofst:(1+idx_t_0)*len(ax_mu)-idx_ofst, idxL]))) # get f at the L under investigation: #skip interpolating from the second surrounding time # t1 #sol_f1d_t_1.append(float(np.interp(en, sol_en[:, idxL], sol_f[idx_t_1*len(ax_mu)-idx_ofst:(1+idx_t_1)*len(ax_mu)-idx_ofst, idxL]))) # get f at the L under investigation: # interpolate to t: #get f at every L at the energy under investigation: #sol_f1d.append(float(np.interp(en, sol_en[:, idxL], sol_f[idx_t*len(ax_mu):(1+idx_t)*len(ax_mu), idxL]))) #get f at the L under investigation: #print (sol_alpha, sol_f1d_t_0, sol_f1d_t_1) #add zero to the beginning of the array and reverse it to have K=0 last | 3.125112 | 3 |
src/data/get_processed_data.py | neerajmachinelearning/DataScienceEnd2End_EmployeeAttrition | 0 | 6630442 | <gh_stars>0
import numpy as np
import pandas as pd
import os
def read_data():
raw_data_path = os.path.join(os.path.pardir, 'data', 'raw')
working_data_path = os.path.join(raw_data_path, 'WA_Fn-UseC_-HR-Employee-Attrition.csv')
work_df = pd.read_csv(working_data_path, index_col='EmployeeNumber')
return work_df
def process_data(df):
return (df
.drop(['EmployeeCount', 'MonthlyIncome', 'Over18', 'StandardHours'], axis=1)
.pipe(categorical_binary)
.pipe(categorical_onehot)
.pipe(reorder_column)
)
def reorder_column(work_df):
columns = [column for column in work_df.columns if column != 'Attrition']
columns = ['Attrition'] + columns
work_df = work_df[columns]
return work_df
def categorical_onehot(work_df):
categorical_col = []
for column in work_df.columns:
if work_df[column].dtype == object and len(work_df[column].unique())<=50:
categorical_col.append(column)
work_df = pd.get_dummies(work_df, columns = categorical_col)
return work_df
def categorical_binary(work_df):
work_df['Attrition'] = work_df['Attrition'].str.lower().replace({'yes': 1, 'no':0})
work_df['Gender'] = work_df['Gender'].str.lower().replace({'male':1,'female':0})
work_df['OverTime'] = work_df['OverTime'].str.lower().replace({'yes':1, 'no':0})
return work_df
def write_data(df):
processed_data_path = os.path.join(os.path.pardir, 'data', 'processed')
write_data_path = os.path.join(processed_data_path, 'processed_Data_Employee-Attrition.csv')
df.to_csv(write_data_path)
if __name__ == '__main__':
df = read_data()
df = process_data(df)
write_data(df)
| import numpy as np
import pandas as pd
import os
def read_data():
raw_data_path = os.path.join(os.path.pardir, 'data', 'raw')
working_data_path = os.path.join(raw_data_path, 'WA_Fn-UseC_-HR-Employee-Attrition.csv')
work_df = pd.read_csv(working_data_path, index_col='EmployeeNumber')
return work_df
def process_data(df):
return (df
.drop(['EmployeeCount', 'MonthlyIncome', 'Over18', 'StandardHours'], axis=1)
.pipe(categorical_binary)
.pipe(categorical_onehot)
.pipe(reorder_column)
)
def reorder_column(work_df):
columns = [column for column in work_df.columns if column != 'Attrition']
columns = ['Attrition'] + columns
work_df = work_df[columns]
return work_df
def categorical_onehot(work_df):
categorical_col = []
for column in work_df.columns:
if work_df[column].dtype == object and len(work_df[column].unique())<=50:
categorical_col.append(column)
work_df = pd.get_dummies(work_df, columns = categorical_col)
return work_df
def categorical_binary(work_df):
work_df['Attrition'] = work_df['Attrition'].str.lower().replace({'yes': 1, 'no':0})
work_df['Gender'] = work_df['Gender'].str.lower().replace({'male':1,'female':0})
work_df['OverTime'] = work_df['OverTime'].str.lower().replace({'yes':1, 'no':0})
return work_df
def write_data(df):
processed_data_path = os.path.join(os.path.pardir, 'data', 'processed')
write_data_path = os.path.join(processed_data_path, 'processed_Data_Employee-Attrition.csv')
df.to_csv(write_data_path)
if __name__ == '__main__':
df = read_data()
df = process_data(df)
write_data(df) | none | 1 | 3.016692 | 3 |
|
src/r2pyapi/utils.py | kohnakagawa/r2pyapi | 0 | 6630443 | <filename>src/r2pyapi/utils.py
from typing import List
def u32_to_u8_array(a: int) -> List[int]:
return [
a & 0xFF,
(a >> 8) & 0xFF,
(a >> 16) & 0xFF,
(a >> 24) & 0xFF,
]
def hex_as_string(a: List[int]) -> str:
return "".join(f"{i:02x}" for i in a)
def hex_as_sring_prefix(a: List[int]) -> str:
return "".join(fr"\x{i:02x}" for i in a)
| <filename>src/r2pyapi/utils.py
from typing import List
def u32_to_u8_array(a: int) -> List[int]:
return [
a & 0xFF,
(a >> 8) & 0xFF,
(a >> 16) & 0xFF,
(a >> 24) & 0xFF,
]
def hex_as_string(a: List[int]) -> str:
return "".join(f"{i:02x}" for i in a)
def hex_as_sring_prefix(a: List[int]) -> str:
return "".join(fr"\x{i:02x}" for i in a)
| none | 1 | 3.058257 | 3 |
|
parser/fase2/team01/Grupo1/Expresiones/Primitivo.py | webdev188/tytus | 35 | 6630444 | import sys
sys.path.append('../Grupo1/Instrucciones')
from instruccion import *
class Primitive(Instruccion):
def __init__(self, type, val):
self.type = type
self.val = val
def execute(self):
return self
def __repr__(self):
return str(self.__dict__)
| import sys
sys.path.append('../Grupo1/Instrucciones')
from instruccion import *
class Primitive(Instruccion):
def __init__(self, type, val):
self.type = type
self.val = val
def execute(self):
return self
def __repr__(self):
return str(self.__dict__)
| none | 1 | 2.429644 | 2 |
|
python/django/django-model-try/proj/settings_local.py | 10sr/junks | 0 | 6630445 | # -*- mode: python -*-
"""
Django settings for neru project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from typing import Dict, List
from proj._settings_common import *
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS: List[str] = []
# Application definition
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS: List[Dict[str, str]] = []
| # -*- mode: python -*-
"""
Django settings for neru project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from typing import Dict, List
from proj._settings_common import *
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS: List[str] = []
# Application definition
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS: List[Dict[str, str]] = []
| en | 0.684805 | # -*- mode: python -*- Django settings for neru project. Generated by 'django-admin startproject' using Django 1.10.6. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # Application definition # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators | 1.73063 | 2 |
bubo/migrations/007.py | jaywink/name-still-wip | 12 | 6630446 | <reponame>jaywink/name-still-wip
def forward(cursor):
# Remove column 'power_to_write' from rooms
cursor.execute("""
CREATE TABLE rooms_backup (
id INTEGER PRIMARY KEY,
name text,
alias text,
room_id text null,
title text default '',
icon text default '',
encrypted integer,
public integer,
type text default ''
)
""")
cursor.execute("""
INSERT INTO rooms_backup SELECT id, name, alias, room_id, title, icon, encrypted, public, type
FROM rooms
""")
cursor.execute("""
DROP TABLE rooms
""")
cursor.execute("""
CREATE TABLE rooms (
id INTEGER PRIMARY KEY autoincrement,
name text,
alias text constraint room_alias_unique_idx unique,
room_id text null constraint room_room_id_unique_idx unique,
title text default '',
icon text default '',
encrypted integer,
public integer,
type text default ''
)
""")
cursor.execute("""
INSERT INTO rooms SELECT id, name, alias, room_id, title, icon, encrypted, public, type
FROM rooms_backup
""")
cursor.execute("""
DROP TABLE rooms_backup
""")
| def forward(cursor):
# Remove column 'power_to_write' from rooms
cursor.execute("""
CREATE TABLE rooms_backup (
id INTEGER PRIMARY KEY,
name text,
alias text,
room_id text null,
title text default '',
icon text default '',
encrypted integer,
public integer,
type text default ''
)
""")
cursor.execute("""
INSERT INTO rooms_backup SELECT id, name, alias, room_id, title, icon, encrypted, public, type
FROM rooms
""")
cursor.execute("""
DROP TABLE rooms
""")
cursor.execute("""
CREATE TABLE rooms (
id INTEGER PRIMARY KEY autoincrement,
name text,
alias text constraint room_alias_unique_idx unique,
room_id text null constraint room_room_id_unique_idx unique,
title text default '',
icon text default '',
encrypted integer,
public integer,
type text default ''
)
""")
cursor.execute("""
INSERT INTO rooms SELECT id, name, alias, room_id, title, icon, encrypted, public, type
FROM rooms_backup
""")
cursor.execute("""
DROP TABLE rooms_backup
""") | en | 0.371268 | # Remove column 'power_to_write' from rooms CREATE TABLE rooms_backup ( id INTEGER PRIMARY KEY, name text, alias text, room_id text null, title text default '', icon text default '', encrypted integer, public integer, type text default '' ) INSERT INTO rooms_backup SELECT id, name, alias, room_id, title, icon, encrypted, public, type FROM rooms DROP TABLE rooms CREATE TABLE rooms ( id INTEGER PRIMARY KEY autoincrement, name text, alias text constraint room_alias_unique_idx unique, room_id text null constraint room_room_id_unique_idx unique, title text default '', icon text default '', encrypted integer, public integer, type text default '' ) INSERT INTO rooms SELECT id, name, alias, room_id, title, icon, encrypted, public, type FROM rooms_backup DROP TABLE rooms_backup | 3.380793 | 3 |
chatterbot/adapters/input/gitter.py | lucasqiu/ChatterBot | 2 | 6630447 | <filename>chatterbot/adapters/input/gitter.py
from chatterbot.adapters.input import InputAdapter
from chatterbot.conversation import Statement
from time import sleep
import requests
class Gitter(InputAdapter):
"""
An input adapter that allows a ChatterBot instance to get
input statements from a Gitter room.
"""
def __init__(self, **kwargs):
super(Gitter, self).__init__(**kwargs)
self.gitter_host = kwargs.get('gitter_host', 'https://api.gitter.im/v1/')
self.gitter_room = kwargs.get('gitter_room')
self.gitter_api_token = kwargs.get('gitter_api_token')
self.only_respond_to_mentions = kwargs.get('gitter_only_respond_to_mentions', True)
self.sleep_time = kwargs.get('gitter_sleep_time', 4)
authorization_header = 'Bearer {}'.format(self.gitter_api_token)
self.headers = {
'Authorization': authorization_header,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
# Join the Gitter room
room_data = self.join_room(self.gitter_room)
self.room_id = room_data.get('id')
user_data = self.get_user_data()
self.user_id = user_data[0].get('id')
self.username = user_data[0].get('username')
def _validate_status_code(self, response):
code = response.status_code
if code not in [200, 201]:
raise self.HTTPStatusException('{} status code recieved'.format(code))
def join_room(self, room_name):
endpoint = '{}rooms'.format(self.gitter_host)
response = requests.post(
endpoint,
headers=self.headers,
json={'uri': room_name}
)
self.logger.info(u'{} joining room {}'.format(
response.status_code, endpoint
))
self._validate_status_code(response)
return response.json()
def get_user_data(self):
endpoint = '{}user'.format(self.gitter_host)
response = requests.get(
endpoint,
headers=self.headers
)
self.logger.info(u'{} retrieving user data {}'.format(
response.status_code, endpoint
))
self._validate_status_code(response)
return response.json()
def mark_messages_as_read(self, message_ids):
endpoint = '{}user/{}/rooms/{}/unreadItems'.format(self.gitter_host, self.user_id, self.room_id)
response = requests.post(
endpoint,
headers=self.headers,
json={'chat': message_ids}
)
self.logger.info(u'{} marking messages as read {}'.format(
response.status_code, endpoint
))
self._validate_status_code(response)
return response.json()
def get_most_recent_message(self):
endpoint = '{}rooms/{}/chatMessages?limit=1'.format(self.gitter_host, self.room_id)
response = requests.get(
endpoint,
headers=self.headers
)
self.logger.info(u'{} getting most recent message'.format(
response.status_code
))
self._validate_status_code(response)
data = response.json()
if data:
return data[0]
return None
def _contains_mention(self, mentions):
for mention in mentions:
if self.username == mention.get('screenName'):
return True
return False
def should_respond(self, data):
"""
Takes the API response data from a single message.
Returns true if the chat bot should respond.
"""
if data and self.only_respond_to_mentions:
if data['unread'] == True and self._contains_mention(data['mentions']):
return True
else:
return False
elif data and data['unread'] == True:
return True
return False
def remove_mentions(self, text):
"""
Return a string that has no leading mentions.
"""
import re
from chatterbot.utils.clean import clean_whitespace
text_without_mentions = re.sub(r'@\S+', '', text)
return clean_whitespace(text_without_mentions)
def process_input(self, statement):
new_message = False
while not new_message:
data = self.get_most_recent_message()
if self.should_respond(data):
self.mark_messages_as_read([data['id']])
new_message = True
self.logger.info(u'')
sleep(self.sleep_time)
text = self.remove_mentions(data['text'])
statement = Statement(text)
return statement
class HTTPStatusException(Exception):
"""
Exception raised when unexpected non-success HTTP
status codes are returned in a response.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| <filename>chatterbot/adapters/input/gitter.py
from chatterbot.adapters.input import InputAdapter
from chatterbot.conversation import Statement
from time import sleep
import requests
class Gitter(InputAdapter):
"""
An input adapter that allows a ChatterBot instance to get
input statements from a Gitter room.
"""
def __init__(self, **kwargs):
super(Gitter, self).__init__(**kwargs)
self.gitter_host = kwargs.get('gitter_host', 'https://api.gitter.im/v1/')
self.gitter_room = kwargs.get('gitter_room')
self.gitter_api_token = kwargs.get('gitter_api_token')
self.only_respond_to_mentions = kwargs.get('gitter_only_respond_to_mentions', True)
self.sleep_time = kwargs.get('gitter_sleep_time', 4)
authorization_header = 'Bearer {}'.format(self.gitter_api_token)
self.headers = {
'Authorization': authorization_header,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
# Join the Gitter room
room_data = self.join_room(self.gitter_room)
self.room_id = room_data.get('id')
user_data = self.get_user_data()
self.user_id = user_data[0].get('id')
self.username = user_data[0].get('username')
def _validate_status_code(self, response):
code = response.status_code
if code not in [200, 201]:
raise self.HTTPStatusException('{} status code recieved'.format(code))
def join_room(self, room_name):
endpoint = '{}rooms'.format(self.gitter_host)
response = requests.post(
endpoint,
headers=self.headers,
json={'uri': room_name}
)
self.logger.info(u'{} joining room {}'.format(
response.status_code, endpoint
))
self._validate_status_code(response)
return response.json()
def get_user_data(self):
endpoint = '{}user'.format(self.gitter_host)
response = requests.get(
endpoint,
headers=self.headers
)
self.logger.info(u'{} retrieving user data {}'.format(
response.status_code, endpoint
))
self._validate_status_code(response)
return response.json()
def mark_messages_as_read(self, message_ids):
endpoint = '{}user/{}/rooms/{}/unreadItems'.format(self.gitter_host, self.user_id, self.room_id)
response = requests.post(
endpoint,
headers=self.headers,
json={'chat': message_ids}
)
self.logger.info(u'{} marking messages as read {}'.format(
response.status_code, endpoint
))
self._validate_status_code(response)
return response.json()
def get_most_recent_message(self):
endpoint = '{}rooms/{}/chatMessages?limit=1'.format(self.gitter_host, self.room_id)
response = requests.get(
endpoint,
headers=self.headers
)
self.logger.info(u'{} getting most recent message'.format(
response.status_code
))
self._validate_status_code(response)
data = response.json()
if data:
return data[0]
return None
def _contains_mention(self, mentions):
for mention in mentions:
if self.username == mention.get('screenName'):
return True
return False
def should_respond(self, data):
"""
Takes the API response data from a single message.
Returns true if the chat bot should respond.
"""
if data and self.only_respond_to_mentions:
if data['unread'] == True and self._contains_mention(data['mentions']):
return True
else:
return False
elif data and data['unread'] == True:
return True
return False
def remove_mentions(self, text):
"""
Return a string that has no leading mentions.
"""
import re
from chatterbot.utils.clean import clean_whitespace
text_without_mentions = re.sub(r'@\S+', '', text)
return clean_whitespace(text_without_mentions)
def process_input(self, statement):
new_message = False
while not new_message:
data = self.get_most_recent_message()
if self.should_respond(data):
self.mark_messages_as_read([data['id']])
new_message = True
self.logger.info(u'')
sleep(self.sleep_time)
text = self.remove_mentions(data['text'])
statement = Statement(text)
return statement
class HTTPStatusException(Exception):
"""
Exception raised when unexpected non-success HTTP
status codes are returned in a response.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| en | 0.765247 | An input adapter that allows a ChatterBot instance to get input statements from a Gitter room. # Join the Gitter room Takes the API response data from a single message. Returns true if the chat bot should respond. Return a string that has no leading mentions. Exception raised when unexpected non-success HTTP status codes are returned in a response. | 2.950327 | 3 |
aries_cloudagent/revocation/manager.py | zanost/aries-cloudagent-python | 1 | 6630448 | <reponame>zanost/aries-cloudagent-python
"""Classes to manage credential revocation."""
import json
import logging
from typing import Mapping, Sequence, Text
from ..core.error import BaseError
from ..core.profile import Profile
from ..indy.issuer import IndyIssuer
from ..storage.error import StorageNotFoundError
from .indy import IndyRevocation
from .models.issuer_cred_rev_record import IssuerCredRevRecord
from .models.issuer_rev_reg_record import IssuerRevRegRecord
class RevocationManagerError(BaseError):
"""Revocation manager error."""
class RevocationManager:
"""Class for managing revocation operations."""
def __init__(self, profile: Profile):
"""
Initialize a RevocationManager.
Args:
context: The context for this revocation manager
"""
self._profile = profile
self._logger = logging.getLogger(__name__)
async def revoke_credential_by_cred_ex_id(
self, cred_ex_id: str, publish: bool = False
):
"""
Revoke a credential by its credential exchange identifier at issue.
Optionally, publish the corresponding revocation registry delta to the ledger.
Args:
cred_ex_id: credential exchange identifier
publish: whether to publish the resulting revocation registry delta,
along with any revocations pending against it
"""
try:
async with self._profile.session() as session:
rec = await IssuerCredRevRecord.retrieve_by_cred_ex_id(
session,
cred_ex_id,
)
except StorageNotFoundError as err:
raise RevocationManagerError(
"No issuer credential revocation record found for "
f"credential exchange id {cred_ex_id}"
) from err
return await self.revoke_credential(
rev_reg_id=rec.rev_reg_id, cred_rev_id=rec.cred_rev_id, publish=publish
)
async def revoke_credential(
self,
rev_reg_id: str,
cred_rev_id: str,
publish: bool = False,
):
"""
Revoke a credential.
Optionally, publish the corresponding revocation registry delta to the ledger.
Args:
rev_reg_id: revocation registry id
cred_rev_id: credential revocation id
publish: whether to publish the resulting revocation registry delta,
along with any revocations pending against it
"""
issuer = self._profile.inject(IndyIssuer)
revoc = IndyRevocation(self._profile)
issuer_rr_rec = await revoc.get_issuer_rev_reg_record(rev_reg_id)
if not issuer_rr_rec:
raise RevocationManagerError(
f"No revocation registry record found for id {rev_reg_id}"
)
if publish:
rev_reg = await revoc.get_ledger_registry(rev_reg_id)
await rev_reg.get_or_fetch_local_tails_path()
# pick up pending revocations on input revocation registry
crids = list(set(issuer_rr_rec.pending_pub + [cred_rev_id]))
(delta_json, _) = await issuer.revoke_credentials(
issuer_rr_rec.revoc_reg_id, issuer_rr_rec.tails_local_path, crids
)
if delta_json:
issuer_rr_rec.revoc_reg_entry = json.loads(delta_json)
await issuer_rr_rec.send_entry(self._profile)
async with self._profile.session() as session:
await issuer_rr_rec.clear_pending(session)
else:
async with self._profile.session() as session:
await issuer_rr_rec.mark_pending(session, cred_rev_id)
async def publish_pending_revocations(
self,
rrid2crid: Mapping[Text, Sequence[Text]] = None,
write_ledger: bool = True,
endorser_did: str = None,
) -> Mapping[Text, Sequence[Text]]:
"""
Publish pending revocations to the ledger.
Args:
rrid2crid: Mapping from revocation registry identifiers to all credential
revocation identifiers within each to publish. Specify null/empty map
for all revocation registries. Specify empty sequence per revocation
registry identifier for all pending within the revocation registry;
e.g.,
{} - publish all pending revocations from all revocation registries
{
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:0": [],
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:1": ["1", "2"]
} - publish:
- all pending revocations from all revocation registry tagged 0
- pending ["1", "2"] from revocation registry tagged 1
- no pending revocations from any other revocation registries.
write_ledger: wether to write the transaction to the ledger, or prepare a
transaction to be endorsed
endorser_did: the did of the endorser, if endorsing the transaction
Returns: mapping from each revocation registry id to its cred rev ids published.
"""
result = {}
issuer = self._profile.inject(IndyIssuer)
txn = await self._profile.transaction()
issuer_rr_recs = await IssuerRevRegRecord.query_by_pending(txn)
for issuer_rr_rec in issuer_rr_recs:
rrid = issuer_rr_rec.revoc_reg_id
crids = []
if not rrid2crid:
crids = issuer_rr_rec.pending_pub
elif rrid in rrid2crid:
crids = [
crid
for crid in issuer_rr_rec.pending_pub
if crid in (rrid2crid[rrid] or []) or not rrid2crid[rrid]
]
if crids:
# FIXME - must use the same transaction
(delta_json, failed_crids) = await issuer.revoke_credentials(
issuer_rr_rec.revoc_reg_id,
issuer_rr_rec.tails_local_path,
crids,
)
issuer_rr_rec.revoc_reg_entry = json.loads(delta_json)
send_entry_result = await issuer_rr_rec.send_entry(
self._profile, write_ledger=write_ledger, endorser_did=endorser_did
)
if endorser_did and not write_ledger:
return send_entry_result
published = [crid for crid in crids if crid not in failed_crids]
result[issuer_rr_rec.revoc_reg_id] = published
await issuer_rr_rec.clear_pending(txn, published)
await txn.commit()
return result
async def clear_pending_revocations(
self, purge: Mapping[Text, Sequence[Text]] = None
) -> Mapping[Text, Sequence[Text]]:
"""
Clear pending revocation publications.
Args:
purge: Mapping from revocation registry identifiers to all credential
revocation identifiers within each to clear. Specify null/empty map
for all revocation registries. Specify empty sequence per revocation
registry identifier for all pending within the revocation registry;
e.g.,
{} - clear all pending revocations from all revocation registries
{
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:0": [],
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:1": ["1", "2"]
} - clear
- all pending revocations from all revocation registry tagged 0
- pending ["1", "2"] from revocation registry tagged 1
- no pending revocations from any other revocation registries.
Returns:
mapping from revocation registry id to its remaining
cred rev ids still marked pending, omitting revocation registries
with no remaining pending publications.
"""
result = {}
async with self._profile.transaction() as txn:
issuer_rr_recs = await IssuerRevRegRecord.query_by_pending(txn)
for issuer_rr_rec in issuer_rr_recs:
rrid = issuer_rr_rec.revoc_reg_id
await issuer_rr_rec.clear_pending(txn, (purge or {}).get(rrid))
if issuer_rr_rec.pending_pub:
result[rrid] = issuer_rr_rec.pending_pub
await txn.commit()
return result
| """Classes to manage credential revocation."""
import json
import logging
from typing import Mapping, Sequence, Text
from ..core.error import BaseError
from ..core.profile import Profile
from ..indy.issuer import IndyIssuer
from ..storage.error import StorageNotFoundError
from .indy import IndyRevocation
from .models.issuer_cred_rev_record import IssuerCredRevRecord
from .models.issuer_rev_reg_record import IssuerRevRegRecord
class RevocationManagerError(BaseError):
"""Revocation manager error."""
class RevocationManager:
"""Class for managing revocation operations."""
def __init__(self, profile: Profile):
"""
Initialize a RevocationManager.
Args:
context: The context for this revocation manager
"""
self._profile = profile
self._logger = logging.getLogger(__name__)
async def revoke_credential_by_cred_ex_id(
self, cred_ex_id: str, publish: bool = False
):
"""
Revoke a credential by its credential exchange identifier at issue.
Optionally, publish the corresponding revocation registry delta to the ledger.
Args:
cred_ex_id: credential exchange identifier
publish: whether to publish the resulting revocation registry delta,
along with any revocations pending against it
"""
try:
async with self._profile.session() as session:
rec = await IssuerCredRevRecord.retrieve_by_cred_ex_id(
session,
cred_ex_id,
)
except StorageNotFoundError as err:
raise RevocationManagerError(
"No issuer credential revocation record found for "
f"credential exchange id {cred_ex_id}"
) from err
return await self.revoke_credential(
rev_reg_id=rec.rev_reg_id, cred_rev_id=rec.cred_rev_id, publish=publish
)
async def revoke_credential(
self,
rev_reg_id: str,
cred_rev_id: str,
publish: bool = False,
):
"""
Revoke a credential.
Optionally, publish the corresponding revocation registry delta to the ledger.
Args:
rev_reg_id: revocation registry id
cred_rev_id: credential revocation id
publish: whether to publish the resulting revocation registry delta,
along with any revocations pending against it
"""
issuer = self._profile.inject(IndyIssuer)
revoc = IndyRevocation(self._profile)
issuer_rr_rec = await revoc.get_issuer_rev_reg_record(rev_reg_id)
if not issuer_rr_rec:
raise RevocationManagerError(
f"No revocation registry record found for id {rev_reg_id}"
)
if publish:
rev_reg = await revoc.get_ledger_registry(rev_reg_id)
await rev_reg.get_or_fetch_local_tails_path()
# pick up pending revocations on input revocation registry
crids = list(set(issuer_rr_rec.pending_pub + [cred_rev_id]))
(delta_json, _) = await issuer.revoke_credentials(
issuer_rr_rec.revoc_reg_id, issuer_rr_rec.tails_local_path, crids
)
if delta_json:
issuer_rr_rec.revoc_reg_entry = json.loads(delta_json)
await issuer_rr_rec.send_entry(self._profile)
async with self._profile.session() as session:
await issuer_rr_rec.clear_pending(session)
else:
async with self._profile.session() as session:
await issuer_rr_rec.mark_pending(session, cred_rev_id)
async def publish_pending_revocations(
self,
rrid2crid: Mapping[Text, Sequence[Text]] = None,
write_ledger: bool = True,
endorser_did: str = None,
) -> Mapping[Text, Sequence[Text]]:
"""
Publish pending revocations to the ledger.
Args:
rrid2crid: Mapping from revocation registry identifiers to all credential
revocation identifiers within each to publish. Specify null/empty map
for all revocation registries. Specify empty sequence per revocation
registry identifier for all pending within the revocation registry;
e.g.,
{} - publish all pending revocations from all revocation registries
{
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:0": [],
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:1": ["1", "2"]
} - publish:
- all pending revocations from all revocation registry tagged 0
- pending ["1", "2"] from revocation registry tagged 1
- no pending revocations from any other revocation registries.
write_ledger: wether to write the transaction to the ledger, or prepare a
transaction to be endorsed
endorser_did: the did of the endorser, if endorsing the transaction
Returns: mapping from each revocation registry id to its cred rev ids published.
"""
result = {}
issuer = self._profile.inject(IndyIssuer)
txn = await self._profile.transaction()
issuer_rr_recs = await IssuerRevRegRecord.query_by_pending(txn)
for issuer_rr_rec in issuer_rr_recs:
rrid = issuer_rr_rec.revoc_reg_id
crids = []
if not rrid2crid:
crids = issuer_rr_rec.pending_pub
elif rrid in rrid2crid:
crids = [
crid
for crid in issuer_rr_rec.pending_pub
if crid in (rrid2crid[rrid] or []) or not rrid2crid[rrid]
]
if crids:
# FIXME - must use the same transaction
(delta_json, failed_crids) = await issuer.revoke_credentials(
issuer_rr_rec.revoc_reg_id,
issuer_rr_rec.tails_local_path,
crids,
)
issuer_rr_rec.revoc_reg_entry = json.loads(delta_json)
send_entry_result = await issuer_rr_rec.send_entry(
self._profile, write_ledger=write_ledger, endorser_did=endorser_did
)
if endorser_did and not write_ledger:
return send_entry_result
published = [crid for crid in crids if crid not in failed_crids]
result[issuer_rr_rec.revoc_reg_id] = published
await issuer_rr_rec.clear_pending(txn, published)
await txn.commit()
return result
async def clear_pending_revocations(
self, purge: Mapping[Text, Sequence[Text]] = None
) -> Mapping[Text, Sequence[Text]]:
"""
Clear pending revocation publications.
Args:
purge: Mapping from revocation registry identifiers to all credential
revocation identifiers within each to clear. Specify null/empty map
for all revocation registries. Specify empty sequence per revocation
registry identifier for all pending within the revocation registry;
e.g.,
{} - clear all pending revocations from all revocation registries
{
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:0": [],
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:1": ["1", "2"]
} - clear
- all pending revocations from all revocation registry tagged 0
- pending ["1", "2"] from revocation registry tagged 1
- no pending revocations from any other revocation registries.
Returns:
mapping from revocation registry id to its remaining
cred rev ids still marked pending, omitting revocation registries
with no remaining pending publications.
"""
result = {}
async with self._profile.transaction() as txn:
issuer_rr_recs = await IssuerRevRegRecord.query_by_pending(txn)
for issuer_rr_rec in issuer_rr_recs:
rrid = issuer_rr_rec.revoc_reg_id
await issuer_rr_rec.clear_pending(txn, (purge or {}).get(rrid))
if issuer_rr_rec.pending_pub:
result[rrid] = issuer_rr_rec.pending_pub
await txn.commit()
return result | en | 0.804239 | Classes to manage credential revocation. Revocation manager error. Class for managing revocation operations. Initialize a RevocationManager. Args: context: The context for this revocation manager Revoke a credential by its credential exchange identifier at issue. Optionally, publish the corresponding revocation registry delta to the ledger. Args: cred_ex_id: credential exchange identifier publish: whether to publish the resulting revocation registry delta, along with any revocations pending against it Revoke a credential. Optionally, publish the corresponding revocation registry delta to the ledger. Args: rev_reg_id: revocation registry id cred_rev_id: credential revocation id publish: whether to publish the resulting revocation registry delta, along with any revocations pending against it # pick up pending revocations on input revocation registry Publish pending revocations to the ledger. Args: rrid2crid: Mapping from revocation registry identifiers to all credential revocation identifiers within each to publish. Specify null/empty map for all revocation registries. Specify empty sequence per revocation registry identifier for all pending within the revocation registry; e.g., {} - publish all pending revocations from all revocation registries { "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:0": [], "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:1": ["1", "2"] } - publish: - all pending revocations from all revocation registry tagged 0 - pending ["1", "2"] from revocation registry tagged 1 - no pending revocations from any other revocation registries. write_ledger: wether to write the transaction to the ledger, or prepare a transaction to be endorsed endorser_did: the did of the endorser, if endorsing the transaction Returns: mapping from each revocation registry id to its cred rev ids published. # FIXME - must use the same transaction Clear pending revocation publications. Args: purge: Mapping from revocation registry identifiers to all credential revocation identifiers within each to clear. Specify null/empty map for all revocation registries. Specify empty sequence per revocation registry identifier for all pending within the revocation registry; e.g., {} - clear all pending revocations from all revocation registries { "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:0": [], "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:1": ["1", "2"] } - clear - all pending revocations from all revocation registry tagged 0 - pending ["1", "2"] from revocation registry tagged 1 - no pending revocations from any other revocation registries. Returns: mapping from revocation registry id to its remaining cred rev ids still marked pending, omitting revocation registries with no remaining pending publications. | 2.308015 | 2 |
aiventure/common/adventure.py | dyeo/aiventure | 0 | 6630449 | from typing import *
class Adventure(object):
def __init__(
self,
name: str = None,
context: str = None,
):
self.name: str = name
self.context: str = context
self.memory: str = ''
self.actions: List[str] = []
self.results: List[str] = []
def to_dict(self) -> dict:
return {
'name': self.name,
'context': self.context,
'memory': self.memory,
'actions': self.actions,
'results': self.results
}
def from_dict(self, d: Dict[str, Any]):
self.name = d['name']
self.context = d['context']
self.memory = d['memory']
self.actions = d['actions']
self.results = d['results']
@property
def story(self) -> list:
"""
The user actions and AI results in chronological order, not including the story context.
:return: A list of action and result strings, interspersed, starting with the first action.
"""
return [s for p in zip(self.actions, self.results) for s in p]
@property
def full_story(self) -> list:
"""
The user actions and AI results in chronological order, including the story context.
:return: The story context string, followed by a list of action and result strings, interspersed, starting
with the first action.
"""
return ([self.context] if self.context else []) + self.story
def get_ai_story(self, start: Optional[int] = None, end: Optional[int] = None) -> list:
"""
Retrieves a clipped portion of the adventure, including the story's memory, for purposes of AI generation.
:param start: Where to start remembering the story from.
:param end: Where the "end" of the story is.
:return: The story context string, followed by a list of the last `self.memory` action and result strings,
interspersed.
"""
start = 0 if start is None else start
end = len(self.story) if end is None else end
result = [self.context] if self.context else []
result += [self.memory]
result += self.story[start:end]
return result
| from typing import *
class Adventure(object):
def __init__(
self,
name: str = None,
context: str = None,
):
self.name: str = name
self.context: str = context
self.memory: str = ''
self.actions: List[str] = []
self.results: List[str] = []
def to_dict(self) -> dict:
return {
'name': self.name,
'context': self.context,
'memory': self.memory,
'actions': self.actions,
'results': self.results
}
def from_dict(self, d: Dict[str, Any]):
self.name = d['name']
self.context = d['context']
self.memory = d['memory']
self.actions = d['actions']
self.results = d['results']
@property
def story(self) -> list:
"""
The user actions and AI results in chronological order, not including the story context.
:return: A list of action and result strings, interspersed, starting with the first action.
"""
return [s for p in zip(self.actions, self.results) for s in p]
@property
def full_story(self) -> list:
"""
The user actions and AI results in chronological order, including the story context.
:return: The story context string, followed by a list of action and result strings, interspersed, starting
with the first action.
"""
return ([self.context] if self.context else []) + self.story
def get_ai_story(self, start: Optional[int] = None, end: Optional[int] = None) -> list:
"""
Retrieves a clipped portion of the adventure, including the story's memory, for purposes of AI generation.
:param start: Where to start remembering the story from.
:param end: Where the "end" of the story is.
:return: The story context string, followed by a list of the last `self.memory` action and result strings,
interspersed.
"""
start = 0 if start is None else start
end = len(self.story) if end is None else end
result = [self.context] if self.context else []
result += [self.memory]
result += self.story[start:end]
return result
| en | 0.856479 | The user actions and AI results in chronological order, not including the story context. :return: A list of action and result strings, interspersed, starting with the first action. The user actions and AI results in chronological order, including the story context. :return: The story context string, followed by a list of action and result strings, interspersed, starting with the first action. Retrieves a clipped portion of the adventure, including the story's memory, for purposes of AI generation. :param start: Where to start remembering the story from. :param end: Where the "end" of the story is. :return: The story context string, followed by a list of the last `self.memory` action and result strings, interspersed. | 3.761923 | 4 |
users/admin.py | KgalabiSimon/django_shopping_cart | 0 | 6630450 | <reponame>KgalabiSimon/django_shopping_cart
from django.contrib import admin
from .models import Customer
# Register your models here.
admin.site.register(Customer) | from django.contrib import admin
from .models import Customer
# Register your models here.
admin.site.register(Customer) | en | 0.968259 | # Register your models here. | 1.39431 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.