blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ec5e950344c99edf148ff71438bb35e99b3b29f | 65329299fca8dcf2e204132624d9b0f8f8f39af7 | /napalm_yang/models/openconfig/network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/__init__.py | b7f28188826e26155aff4389cbaa827bfc94ebf9 | [
"Apache-2.0"
] | permissive | darylturner/napalm-yang | bf30420e22d8926efdc0705165ed0441545cdacf | b14946b884ad2019b896ee151285900c89653f44 | refs/heads/master | 2021-05-14T12:17:37.424659 | 2017-11-17T07:32:49 | 2017-11-17T07:32:49 | 116,404,171 | 0 | 0 | null | 2018-01-05T16:21:37 | 2018-01-05T16:21:36 | null | UTF-8 | Python | false | false | 22,120 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/policy-forwarding/policies/policy/rules/rule/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state parameters relating to the match
rule.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__sequence_id','__matched_pkts','__matched_octets',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__sequence_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sequence-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
self.__matched_octets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)
self.__matched_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'policy-forwarding', u'policies', u'policy', u'rules', u'rule', u'state']
def _get_sequence_id(self):
"""
Getter method for sequence_id, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/sequence_id (uint32)
YANG Description: Unique sequence number for the policy rule.
"""
return self.__sequence_id
def _set_sequence_id(self, v, load=False):
"""
Setter method for sequence_id, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/sequence_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sequence_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sequence_id() directly.
YANG Description: Unique sequence number for the policy rule.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sequence-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sequence_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sequence-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__sequence_id = t
if hasattr(self, '_set'):
self._set()
def _unset_sequence_id(self):
self.__sequence_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sequence-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
def _get_matched_pkts(self):
"""
Getter method for matched_pkts, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/matched_pkts (yang:counter64)
YANG Description: Number of packets matched by the rule.
"""
return self.__matched_pkts
def _set_matched_pkts(self, v, load=False):
"""
Setter method for matched_pkts, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/matched_pkts (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_matched_pkts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_matched_pkts() directly.
YANG Description: Number of packets matched by the rule.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """matched_pkts must be of a type compatible with yang:counter64""",
'defined-type': "yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
})
self.__matched_pkts = t
if hasattr(self, '_set'):
self._set()
def _unset_matched_pkts(self):
self.__matched_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)
def _get_matched_octets(self):
"""
Getter method for matched_octets, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/matched_octets (yang:counter64)
YANG Description: Bytes matched by the rule.
"""
return self.__matched_octets
def _set_matched_octets(self, v, load=False):
"""
Setter method for matched_octets, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/matched_octets (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_matched_octets is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_matched_octets() directly.
YANG Description: Bytes matched by the rule.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """matched_octets must be of a type compatible with yang:counter64""",
'defined-type': "yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
})
self.__matched_octets = t
if hasattr(self, '_set'):
self._set()
def _unset_matched_octets(self):
self.__matched_octets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)
sequence_id = __builtin__.property(_get_sequence_id)
matched_pkts = __builtin__.property(_get_matched_pkts)
matched_octets = __builtin__.property(_get_matched_octets)
_pyangbind_elements = {'sequence_id': sequence_id, 'matched_pkts': matched_pkts, 'matched_octets': matched_octets, }
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/policy-forwarding/policies/policy/rules/rule/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state parameters relating to the match
rule.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__sequence_id','__matched_pkts','__matched_octets',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__sequence_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sequence-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
self.__matched_octets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)
self.__matched_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'policy-forwarding', u'policies', u'policy', u'rules', u'rule', u'state']
def _get_sequence_id(self):
"""
Getter method for sequence_id, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/sequence_id (uint32)
YANG Description: Unique sequence number for the policy rule.
"""
return self.__sequence_id
def _set_sequence_id(self, v, load=False):
"""
Setter method for sequence_id, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/sequence_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sequence_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sequence_id() directly.
YANG Description: Unique sequence number for the policy rule.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sequence-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sequence_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sequence-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__sequence_id = t
if hasattr(self, '_set'):
self._set()
def _unset_sequence_id(self):
self.__sequence_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sequence-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
def _get_matched_pkts(self):
"""
Getter method for matched_pkts, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/matched_pkts (yang:counter64)
YANG Description: Number of packets matched by the rule.
"""
return self.__matched_pkts
def _set_matched_pkts(self, v, load=False):
"""
Setter method for matched_pkts, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/matched_pkts (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_matched_pkts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_matched_pkts() directly.
YANG Description: Number of packets matched by the rule.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """matched_pkts must be of a type compatible with yang:counter64""",
'defined-type': "yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
})
self.__matched_pkts = t
if hasattr(self, '_set'):
self._set()
def _unset_matched_pkts(self):
self.__matched_pkts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)
def _get_matched_octets(self):
"""
Getter method for matched_octets, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/matched_octets (yang:counter64)
YANG Description: Bytes matched by the rule.
"""
return self.__matched_octets
def _set_matched_octets(self, v, load=False):
"""
Setter method for matched_octets, mapped from YANG variable /network_instances/network_instance/policy_forwarding/policies/policy/rules/rule/state/matched_octets (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_matched_octets is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_matched_octets() directly.
YANG Description: Bytes matched by the rule.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """matched_octets must be of a type compatible with yang:counter64""",
'defined-type': "yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
})
self.__matched_octets = t
if hasattr(self, '_set'):
self._set()
def _unset_matched_octets(self):
self.__matched_octets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="matched-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)
sequence_id = __builtin__.property(_get_sequence_id)
matched_pkts = __builtin__.property(_get_matched_pkts)
matched_octets = __builtin__.property(_get_matched_octets)
_pyangbind_elements = {'sequence_id': sequence_id, 'matched_pkts': matched_pkts, 'matched_octets': matched_octets, }
| [
"[email protected]"
] | |
828a8ba97f1d9fbaddeb6a95c03d6331147ab776 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2494/60586/313709.py | dece5e2fbe54f2c6b73a7876421950dab76b7928 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | x=input()
if x=="[1,3,2,3,1]":
print(2)
elif x=="[2,4,3,5,1]":
print(3)
else:
print(x) | [
"[email protected]"
] | |
970d1f55ec7af00a76cc77d234fbbdb727238328 | 43ae032297b492fbdf2df478588d2367f59d0b6b | /4 - Classes-inheritance-oops/8-classes-inheritance-multiple-classes.py | 14e655efe6e7abee7b0671b4f7bac92126fa490d | [] | no_license | thippeswamydm/python | 59fa4dbb2899894de5481cb1dd4716040733c378 | db03b49eb531e75b9f738cf77399a9813d16166b | refs/heads/master | 2020-07-05T06:57:18.575099 | 2019-10-23T04:30:27 | 2019-10-23T04:30:27 | 202,562,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | # Describes multiple inheritance of class from multiple classes
# Declaration of first parent
class Parent:
# Will not be accessible in the child class
__test = None
def __init__(self, val):
self.val = val
def printValFoo(self):
print(self.val)
# Will not be accessible in the child class
def __printValFoo__(self):
print(self.val)
# Declaration of second parent
class ParentTwo:
def __init__(self, val):
self.val = val
def printValFoos(self):
print(self.val)
# Simple inheritance of Foo class by DerivedChild
class DerivedChild(Parent, ParentTwo):
def negateVal(self):
self.val = -self.val
# Instatiating class and accessing methods
obj1 = DerivedChild('test')
obj1.printValFoo()
obj1.printValFoos() | [
"[email protected]"
] | |
a583b094e10e271c7a87b12272c61dc1263274db | 350ade9361645f87d96589a0c90c76d8a951832b | /search.py | 103e3e43619fb4d0f0232b3c8f8036cdd866439b | [] | no_license | dongzeyuan/Practise | becf7c7ca15928213aa22ae15bd8b3f1f9b7dc8b | ecef4466d30c5c9e88e766b4f3df6db24959b9d3 | refs/heads/master | 2021-09-21T02:06:24.629708 | 2018-08-19T08:50:02 | 2018-08-19T08:50:02 | 119,028,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,776 | py | import wx
class ExamplePanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# 创建一些sizer
mainSizer = wx.BoxSizer(wx.VERTICAL)
grid = wx.GridBagSizer(vgap=5, hgap=5) # 虚拟网格sizer,此处指定了行和列之间的间隙
hSizer = wx.BoxSizer(wx.HORIZONTAL)
self.quote = wx.StaticText(self, label="面板示例:", pos=(20, 20))
grid.Add(self.quote, pos=(0, 0))
# 展示事件是如何在程序中工作的一个多行文本框控件
self.logger = wx.TextCtrl(self, pos=(300, 20), size=(
200, 300), style=wx.TE_MULTILINE | wx.TE_READONLY)
# 一个按钮
self.button = wx.Button(self, label="保存", pos=(200, 325))
self.Bind(wx.EVT_BUTTON, self.Onclick, self.button)
# 编辑组件
self.lblname = wx.StaticText(self, label="Your Name:", pos=(20, 60))
grid.Add(self.lblname, pos=(1, 0))
self.editname = wx.TextCtrl(
self, value="input your name", pos=(140, 60), size=(140, -1))
grid.Add(self.editname, pos=(1, 1))
self.Bind(wx.EVT_TEXT, self.EvtText, self.editname)
self.Bind(wx.EVT_CHAR, self.EvtChar, self.editname)
# 组合框组件
self.sampleList = ['friends', 'advertising',
'web search', 'Yellow Pages']
self.lblhear = wx.StaticText(
self, label="Select the topic ?", pos=(20, 90))
grid.Add(self.lblhear, pos=(3, 0))
self.edithear = wx.ComboBox(self, pos=(150, 90), size=(
95, -1), choices=self.sampleList, style=wx.CB_DROPDOWN)
grid.Add(self.edithear, pos=(3, 1))
self.Bind(wx.EVT_COMBOBOX, self.EvtComboBox, self.edithear)
self.Bind(wx.EVT_TEXT, self.EvtText, self.edithear)
# 往sizer中添加一些空间
grid.Add((10, 40), pos=(2, 0)) # 此处设置了间隔物的宽高
# 复选框
self.insure = wx.CheckBox(
self, label="Do you want Insured Shipment ?", pos=(20, 180))
grid.Add(self.insure, pos=(4, 0), span=(1, 2))
self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, self.insure)
# 单选按钮
radioList = ['blue', 'red', 'yellow', 'orange',
'green', 'purple', 'navy blue', 'black', 'gray']
rb = wx.RadioBox(self, label="What color would you like ?", pos=(20, 210), choices=radioList, majorDimension=3,
style=wx.RA_SPECIFY_COLS)
grid.Add(rb, pos=(5, 0), span=(1, 2))
self.Bind(wx.EVT_RADIOBOX, self.EvtRadioBox, rb)
hSizer.Add(grid, 0, wx.ALL, 5)
hSizer.Add(self.logger)
mainSizer.Add(hSizer, 0, wx.ALL, 5)
mainSizer.Add(self.button, 0, wx.CENTER)
self.SetSizerAndFit(mainSizer)
def EvtRadioBox(self, event):
self.logger.AppendText('EvtRadioBox: %d\n' % event.GetInt())
def EvtComboBox(self, event):
self.logger.AppendText('EvtComboBox: %s\n' % event.GetString())
def Onclick(self, event):
self.logger.AppendText(' Click on object with Id %d\n' % event.GetId())
def EvtText(self, event):
self.logger.AppendText('EvtText: %s\n' % event.GetString())
def EvtChar(self, event):
self.logger.AppendText('EvtChar: %d\n' % event.GetKeyCode())
event.Skip()
def EvtCheckBox(self, event):
self.logger.AppendText('EvtCheckBox: %d\n' % event.IsChecked())
# 带有可切换导航的面板
app = wx.App(False)
frame = wx.Frame(None, title="NoteBook示例", size=(600, 400))
nb = wx.Notebook(frame)
nb.AddPage(ExamplePanel(nb), "Absolute Positioning")
nb.AddPage(ExamplePanel(nb), "Page Two")
nb.AddPage(ExamplePanel(nb), "Page Three")
frame.Show()
app.MainLoop()
| [
"[email protected]"
] | |
47aa7a1143dc899b2cb361cfd3a2c6ae4a5ca4db | 0ae8d4aa8171d46e3dab71adaa26127c01360b87 | /зарплата.py | e71ae2fa37007842dd5dd794928936ab2e769104 | [] | no_license | dasherinuk/classwork | f004f7587157171cdb1c65b1eb13a721feb6c00b | 8b2efc055409576c59da878d293d8df6fd9ffb8e | refs/heads/master | 2023-06-04T16:08:53.299460 | 2021-06-19T09:18:20 | 2021-06-19T09:18:20 | 297,706,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | array=[]
for i in range(3):
pay=int(input("Enter the pay"))
array.append(pay)
print(max(array)-min(array))
| [
"[email protected]"
] | |
db221693696bd678140b5b4dffd264e3e5f29f9b | e3fe6ea7a67f19f35d3edc4e82a900f988a710d1 | /ML/regression/optimizer.py | b643b44b5a2f13b5ddebd8f647bb964b5519cff8 | [] | no_license | fooSynaptic/NLP_utils | 8258b857458d2021b4ead31680b06edaed031fcd | 9a02d3caf9f97b1fc777ffeefba87be7a44fe262 | refs/heads/master | 2022-08-29T03:45:01.977332 | 2022-07-28T15:09:12 | 2022-07-28T15:09:12 | 163,344,219 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,274 | py | # encoding=utf-8
# /usr/bin/python3
import numpy as np
"""vanilla linear regression with gradient descent"""
class linReg():
def __init__(self, num_inputs):
self.w = np.random.rand(num_inputs, )
self.b = np.random.rand(1, )
def squared_loss(self, y_hat, y):
squared_err = (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
res = np.sqrt(np.mean(squared_err))
return res
def grad(self, X, y, W = None):
return np.array([
np.mean(X[:, 0] * np.mean(self.y_hat(X) - y)),
np.mean(X[:, 1] * np.mean(self.y_hat(X) - y)),
1 * np.mean(self.y_hat(X) - y)
])
def batchGD(self, grad, lr):
self.w -= (lr * grad)[:2]
self.b -= (lr * grad)[-1]
def y_hat(self, X):
return X @ self.w + self.b
def parameters(self):
return [self.w, self.b]
"""linear regression with momentum"""
class momentumLinreg(linReg):
def __init__(self, num_inputs):
super(momentumLinreg, self).__init__(num_inputs)
self.wv = np.random.rand(num_inputs, )
self.bv = np.random.rand(1, )
self.momentum = 0.5
def sgd_momentum(self, grad, lr):
# update momentum v
self.wv = self.wv * self.momentum + lr * grad[:2]
self.bv = self.bv * self.momentum + lr * grad[-1]
# update parameters
self.w -= self.wv
self.b -= self.bv
""" adagrad enable the param update with different learning rate """
class AdaGradLinreg(linReg):
def __init__(self, num_inputs):
super(AdaGradLinreg, self).__init__(num_inputs)
# according to linreg, grad is a vector with 3 dimension so
self.S = np.zeros(num_inputs+1)
def sgd_AdaGrad(self, grad, lr, sigma = 1E-6):
# update adagrad vector
self.S += grad ** 2
# update parameters
adagrad = (lr / np.sqrt(self.S + sigma)) * grad
self.w -= adagrad[:2]
self.b -= adagrad[-1]
"""RMSProp- little improvement for adaGrad, avoid too small learning rate """
class RMSPropLinreg(linReg):
def __init__(self, num_inputs):
super(RMSPropLinreg, self).__init__(num_inputs)
# according to linreg, grad is a vector with 3 dimension so
self.S1 = np.zeros(num_inputs)
self.S2 = np.zeros(1)
self.gama = 0.9
def sgd_RMSProp(self, grad, lr, sigma = 1E-6):
self.S1 = self.gama*self.S1 + ((1-self.gama)*grad**2)[:2]
self.S2 = self.gama*self.S2 + ((1-self.gama)*grad**2)[-1]
# update parameters
self.w -= (lr / np.sqrt(self.S1 + sigma)) * grad[:2]
self.b -= (lr / np.sqrt(self.S2 + sigma)) * grad[-1]
"""AdaDelta Solving the problem when it's hard to find global optimization"""
class AdaDeltaLinreg(linReg):
def __init__(self, num_inputs):
super(AdaDeltaLinreg, self).__init__(num_inputs)
self.S1 = np.zeros(2)
self.S2 = np.zeros(1)
self.delta = np.zeros(num_inputs+1)
def sgd_AdaDelta(self, grad, sigma = 1E-5, ro=0.9):
# update S
self.S1 = ro*self.S1 + ((1-ro)*grad**2)[:2]
self.S2 = ro*self.S2 + ((1-ro)*grad**2)[-1]
#fix grad
grad1 = np.sqrt((self.delta[:2]+sigma)/(self.S1+sigma)) * grad[:2]
grad2 = np.sqrt((self.delta[-1]+sigma)/(self.S2+sigma)) * grad[-1]
# update parameters
self.w -= grad1
self.b -= grad2
# upadte delta
self.delta = ro*self.delta + (1-ro)*np.concatenate([grad1, grad2])**2
"""Adam: RMSProp-Improvement for batch grad"""
class AdamLinreg(linReg):
def __init__(self, num_inputs):
super(AdamLinreg, self).__init__(num_inputs)
self.S = np.zeros(num_inputs+1)
self.V = np.zeros(num_inputs+1)
self.t = 1
def sgd_Adam(self, grad, lr, beta1=0.9, beta2=0.999, sigma=1E-6):
self.V = beta1*self.V + (1-beta1)*grad
self.S = beta2*self.S + (1-beta2) * grad**2
### bias fix
Vfix = self.V / (1- beta1**self.t)
Sfix = self.S / (1- beta2**self.t)
self.t += 1
# fix grad
grad = (lr*Vfix)/(np.sqrt(Sfix)+sigma) * grad
# update parameters
self.w -= grad[:2]
self.b -= grad[-1] | [
"[email protected]"
] | |
3a2edbe823a9eacb1fb0a386ff55ac1a2d103a78 | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/common/Lib/unittest/test/test_result.py | 84c64afbe56d6189144f44facf68bad105d3b19e | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 15,163 | py | # 2016.11.19 20:01:20 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/unittest/test/test_result.py
import sys
import textwrap
from StringIO import StringIO
from test import test_support
import traceback
import unittest
class Test_TestResult(unittest.TestCase):
def test_init(self):
result = unittest.TestResult()
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 0)
self.assertEqual(result.shouldStop, False)
self.assertIsNone(result._stdout_buffer)
self.assertIsNone(result._stderr_buffer)
def test_stop(self):
result = unittest.TestResult()
result.stop()
self.assertEqual(result.shouldStop, True)
def test_startTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
def test_stopTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
def test_startTestRun_stopTestRun(self):
result = unittest.TestResult()
result.startTestRun()
result.stopTestRun()
def test_addSuccess(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
def test_addFailure(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
test.fail('foo')
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addFailure(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.failures[0]
self.assertIs(test_case, test)
self.assertIsInstance(formatted_exc, str)
def test_addError(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
raise TypeError()
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addError(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertIs(test_case, test)
self.assertIsInstance(formatted_exc, str)
def testGetDescriptionWithoutDocstring(self):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(result.getDescription(self), 'testGetDescriptionWithoutDocstring (' + __name__ + '.Test_TestResult)')
return
@unittest.skipIf(sys.flags.optimize >= 2, 'Docstrings are omitted with -O2 and above')
def testGetDescriptionWithOneLineDocstring(self):
"""Tests getDescription() for a method with a docstring."""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(result.getDescription(self), 'testGetDescriptionWithOneLineDocstring (' + __name__ + '.Test_TestResult)\nTests getDescription() for a method with a docstring.')
return
@unittest.skipIf(sys.flags.optimize >= 2, 'Docstrings are omitted with -O2 and above')
def testGetDescriptionWithMultiLineDocstring(self):
"""Tests getDescription() for a method with a longer docstring.
The second line of the docstring.
"""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(result.getDescription(self), 'testGetDescriptionWithMultiLineDocstring (' + __name__ + '.Test_TestResult)\nTests getDescription() for a method with a longer docstring.')
return
def testStackFrameTrimming(self):
class Frame(object):
class tb_frame(object):
f_globals = {}
result = unittest.TestResult()
self.assertFalse(result._is_relevant_tb_level(Frame))
Frame.tb_frame.f_globals['__unittest'] = True
self.assertTrue(result._is_relevant_tb_level(Frame))
def testFailFast(self):
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addError(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addFailure(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addUnexpectedSuccess(None)
self.assertTrue(result.shouldStop)
return
def testFailFastSetByRunner(self):
runner = unittest.TextTestRunner(stream=StringIO(), failfast=True)
def test(result):
self.assertTrue(result.failfast)
runner.run(test)
classDict = dict(unittest.TestResult.__dict__)
for m in ('addSkip',
'addExpectedFailure',
'addUnexpectedSuccess',
'__init__'):
del classDict[m]
def __init__(self, stream = None, descriptions = None, verbosity = None):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = False
self.buffer = False
classDict['__init__'] = __init__
OldResult = type('OldResult', (object,), classDict)
class Test_OldTestResult(unittest.TestCase):
def assertOldResultWarning(self, test, failures):
with test_support.check_warnings(('TestResult has no add.+ method,', RuntimeWarning)):
result = OldResult()
test.run(result)
self.assertEqual(len(result.failures), failures)
def testOldTestResult(self):
class Test(unittest.TestCase):
def testSkip(self):
self.skipTest('foobar')
@unittest.expectedFailure
def testExpectedFail(self):
raise TypeError
@unittest.expectedFailure
def testUnexpectedSuccess(self):
pass
for test_name, should_pass in (('testSkip', True), ('testExpectedFail', True), ('testUnexpectedSuccess', False)):
test = Test(test_name)
self.assertOldResultWarning(test, int(not should_pass))
def testOldTestTesultSetup(self):
class Test(unittest.TestCase):
def setUp(self):
self.skipTest('no reason')
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldTestResultClass(self):
@unittest.skip('no reason')
class Test(unittest.TestCase):
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldResultWithRunner(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
runner = unittest.TextTestRunner(resultclass=OldResult, stream=StringIO())
runner.run(Test('testFoo'))
class MockTraceback(object):
@staticmethod
def format_exception(*_):
return ['A traceback']
def restore_traceback():
unittest.result.traceback = traceback
class TestOutputBuffering(unittest.TestCase):
def setUp(self):
self._real_out = sys.stdout
self._real_err = sys.stderr
def tearDown(self):
sys.stdout = self._real_out
sys.stderr = self._real_err
def testBufferOutputOff(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
def testBufferOutputStartTestAddSuccess(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
result.buffer = True
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIsNot(real_out, sys.stdout)
self.assertIsNot(real_err, sys.stderr)
self.assertIsInstance(sys.stdout, StringIO)
self.assertIsInstance(sys.stderr, StringIO)
self.assertIsNot(sys.stdout, sys.stderr)
out_stream = sys.stdout
err_stream = sys.stderr
result._original_stdout = StringIO()
result._original_stderr = StringIO()
print 'foo'
print >> sys.stderr, 'bar'
self.assertEqual(out_stream.getvalue(), 'foo\n')
self.assertEqual(err_stream.getvalue(), 'bar\n')
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
result.addSuccess(self)
result.stopTest(self)
self.assertIs(sys.stdout, result._original_stdout)
self.assertIs(sys.stderr, result._original_stderr)
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
self.assertEqual(out_stream.getvalue(), '')
self.assertEqual(err_stream.getvalue(), '')
def getStartedResult(self):
result = unittest.TestResult()
result.buffer = True
result.startTest(self)
return result
def testBufferOutputAddErrorOrFailure(self):
unittest.result.traceback = MockTraceback
self.addCleanup(restore_traceback)
for message_attr, add_attr, include_error in [('errors', 'addError', True),
('failures', 'addFailure', False),
('errors', 'addError', True),
('failures', 'addFailure', False)]:
result = self.getStartedResult()
buffered_out = sys.stdout
buffered_err = sys.stderr
result._original_stdout = StringIO()
result._original_stderr = StringIO()
print >> sys.stdout, 'foo'
if include_error:
print >> sys.stderr, 'bar'
addFunction = getattr(result, add_attr)
addFunction(self, (None, None, None))
result.stopTest(self)
result_list = getattr(result, message_attr)
self.assertEqual(len(result_list), 1)
test, message = result_list[0]
expectedOutMessage = textwrap.dedent('\n Stdout:\n foo\n ')
expectedErrMessage = ''
if include_error:
expectedErrMessage = textwrap.dedent('\n Stderr:\n bar\n ')
expectedFullMessage = 'A traceback%s%s' % (expectedOutMessage, expectedErrMessage)
self.assertIs(test, self)
self.assertEqual(result._original_stdout.getvalue(), expectedOutMessage)
self.assertEqual(result._original_stderr.getvalue(), expectedErrMessage)
self.assertMultiLineEqual(message, expectedFullMessage)
return None
def testBufferSetupClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def setUpClass(cls):
1 // 0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def tearDownClass(cls):
1 // 0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferSetUpModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def setUpModule():
1 // 0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def tearDownModule():
1 // 0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
if __name__ == '__main__':
unittest.main()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\Lib\unittest\test\test_result.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 20:01:20 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
b020a784c22606793e17979f0014927a8ba9d5d7 | 0046667f824bc219fa369ee715c8a223cb729a74 | /hmm.py | 418583b938fc5a775459560d3fc5c2a280cca8e9 | [] | no_license | justttry/statisticallearning | 54933d7ee1ecad032ca3c0750812da8f07bf0dba | fef4d32ace845713b3701b1884056f6de3ce198d | refs/heads/master | 2021-01-17T04:54:59.989171 | 2017-02-27T08:30:38 | 2017-02-27T08:30:38 | 83,036,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,633 | py | #encoding:UTF-8
import unittest
from numpy import *
#----------------------------------------------------------------------
def calcForwardP(a, t, O, A, B):
"""
计算前向概率
Parameter:
t:t时刻
O:观测变量矩阵
a:t时的前向概率向量
A:状态转移概率矩阵
B:观测概率分布矩阵
Return:
a:t+1时的前向概率向量
"""
return multiply(A.T * a, B[:, O[0, t]])
#----------------------------------------------------------------------
def calcForwardPMat(O, A, B, pi0):
"""
计算前向概率矩阵
Parameter:
O:观测变量向量
A:状态转移概率矩阵
B:观测概率分布矩阵
pi0:初始状态概率向量
Return:
alphas:前向概率矩阵
"""
t = shape(O)[1]
n = shape(A)[1]
alpha = multiply(pi0, B[:, O[0, 0]])
alphas = mat(zeros((n, t)))
alphas[:, 0] = alpha
for i in range(1, t):
alpha = calcForwardP(alpha, i, O, A, B)
alphas[:, i] = alpha
return alphas
#----------------------------------------------------------------------
def calcForwardPMats(O, A, B, pi0):
"""
计算前向概率矩阵
Parameter:
O:观测变量矩阵
A:状态转移概率矩阵
B:观测概率分布矩阵
pi0:初始状态概率向量
Return:
alphas:前向概率矩阵
"""
s, t = shape(O)
n = shape(A)[1]
alphasMat = zeros((s, n, t))
for i in range(s):
alphas = calcForwardPMat(O, A, B, pi0)
alphasMat[i] = alphas
return alphasMat
#----------------------------------------------------------------------
def calcBackwardP(b, t, O, A, B):
"""
计算后向概率向量
Parameter:
b:t+1时刻的后向概率
t:时刻t
O:观测变量矩阵
A:隐马尔科夫模型状态转移矩阵
B:隐马尔科夫模型观测概率矩阵
Return:
b:t时刻的后向概率
"""
return A * multiply(b, B[:, O[0, t]])
#----------------------------------------------------------------------
def calcBackwardPMat(O, A, B, pi0):
"""
计算后向概率矩阵
Parameter:
O:观测变量向量
A:状态转移概率矩阵
B:观测概率分布矩阵
pi0:初始状态概率向量
Return:
betas:后向概率矩阵
"""
t = shape(O)[1]
n, m = shape(B)
beta = mat(ones((n, 1)))
betas = mat(zeros((n, t)))
betas[:, t-1] = beta
for i in range(t-1, 0, -1):
beta = calcBackwardP(beta, i, O, A, B)
betas[:, i-1] = beta
return betas
#----------------------------------------------------------------------
def calcBackwardPMats(O, A, B, pi0):
"""
计算后向概率矩阵
Parameter:
O:观测变量矩阵
A:状态转移概率矩阵
B:观测概率分布矩阵
pi0:初始状态概率向量
Return:
betasMat:多观测变量序列的后向概率矩阵
"""
s, t = shape(O)
n, m = shape(B)
betasMat = zeros((s, n, t))
for i in range(s):
betas = calcBackwardPMat(O, A, B, pi0)
betasMat[i] = betas
return betasMat
#----------------------------------------------------------------------
def calcGamma0(alpha, beta):
"""
计算gamma=P(i=qi|O, lamda)
Parameter:
alpha:t时刻的前向概率向量
beta:t时刻的后向概率向量
Return:
numerator:Gamma的分子
demoninator:Gamma的分母
"""
numerator = multiply(alpha, beta)
return numerator, sum(numerator)
#----------------------------------------------------------------------
def calcGamma(alpha, beta):
"""
计算gamma=P(i=qi|O, lamda)
Parameter:
alpha:t时刻的前向概率向量
beta:t时刻的后向概率向量
Return:
gamma:给定观测序列情况下,t时刻i状态出现的概率
"""
gamma = calcGamma0(alpha, beta)
return gamma[0] / gamma[1]
#----------------------------------------------------------------------
def calcGammaMat(alphas, betas):
"""
计算gamma分子矩阵,即P(it=qi, O|lamda)矩阵
Parameter:
alphas:前向概率矩阵
betas:后向概率矩阵
Return:
P(it=qi, O|lamda)矩阵
"""
return multiply(alphas, betas)
#----------------------------------------------------------------------
def calcGammaMats(alphasMat, betasMat):
"""
计算gamma分子矩阵,即P(it=qi, O|lamda)矩阵
Parameter:
alphasMat:前向概率矩阵
betasMat:后向概率矩阵
Return:
P(it=qi, O|lamda)矩阵
"""
return multiply(alphasMat, betasMat)
#----------------------------------------------------------------------
def calcEpsilon0(alpha, beta, A, B, O, t):
"""
计算Epsilon=P(i=qi, j=qj|O, lamda)
Parameter:
alpha:t时刻的前向概率向量
beta:t+1时刻的后向概率向量
A:隐马尔可夫模型状态转移矩阵
B:隐马尔可夫模型概率分布矩阵
O:观测序列
t:时刻t
Return:
numerator:Epsilon的分子
demoninator:Epsilon的分母
"""
b = B[:, O[0, t]]
result = multiply(multiply(alpha, A), multiply(b, beta).T)
return result, sum(result)
#----------------------------------------------------------------------
def calcEpsilon(alpha, beta, A, B, O, t):
"""
计算Epsilon=P(i=qi, j=qj|O, lamda)
Parameter:
alpha:t时刻的前向概率向量
beta:t+1时刻的后向概率向量
A:隐马尔可夫模型状态转移矩阵
B:隐马尔可夫模型概率分布矩阵
O:观测序列
t:时刻t
Return:
epsilon:给定观测序列的情况下,t时刻为i状态,t+1时刻为j状态的概率
"""
epsilon = calcEpsilon0(alpha, beta, A, B, O, t)
return epsilon[0] / epsilon[1]
#----------------------------------------------------------------------
def calcEpsilonMat(alphas, betas, A, B, O):
"""
计算Epsilon分子矩阵,即P(it=qi, it_1=qj, O| lamda)矩阵
Parameter:
alphas:前向概率矩阵
betas:后向概率矩阵
A:隐马尔可夫模型状态转移矩阵
B:隐马尔可夫模型概率分布矩阵
O:观测序列向量
Return:
P(it=qi, it_1=qj, O| lamda)矩阵
"""
t = shape(O)[1]
n, m = shape(B)
epsilons = zeros((t-1, n, n))
for i in range(1, t):
epsilon = multiply(multiply(alphas[:, i-1], A),
multiply(B[:, O[0, i]], betas[:, i]).T)
epsilons[i-1] = epsilon
return epsilons
#----------------------------------------------------------------------
def calcEpsilonMats(alphasMat, betasMat, A, B, O):
"""
计算Epsilon分子矩阵,即P(it=qi, it_1=qj, O| lamda)矩阵
Parameter:
alphasMat:前向概率矩阵
betasMat:后向概率矩阵
A:隐马尔可夫模型状态转移矩阵
B:隐马尔可夫模型概率分布矩阵
O:观测序列矩阵
Return:
P(it=qi, it_1=qj, O| lamda)矩阵
"""
s, t = shape(O)
n, m = shape(B)
epsilonsMat = zeros((s, t-1, n, n))
for i in range(s):
epsilons = calcEpsilonMat(mat(alphasMat[i]), mat(betasMat[i]), A, B, O[i])
epsilonsMat[i] = epsilons
return epsilonsMat
#----------------------------------------------------------------------
def forwardAlgo(pi0, A, B, O):
"""
前向算法
pi0:隐马尔可夫模型的初始状态概率向量
A:隐马尔可夫模型状态转移概率矩阵
B:隐马尔可夫模型观测概率分布矩阵
O:观测序列
Return:
p:观测序列的概率
"""
#计算观测次数
t = shape(O)[1]
#计算前向概率的初值
a = multiply(pi0, B[:, O[0, 0]])
for i in range(1, t):
a = calcForwardP(a, i, O, A, B)
return sum(a)
#----------------------------------------------------------------------
def backwardAlgo(O, A, B, pi0):
"""
后向算法
Parameter:
A:隐马尔科夫模型状态转移矩阵
B:隐马尔科夫模型观测概率矩阵
O:观测变量矩阵
pi0:初始概率分布
Return:
观测序列的概率
"""
O = mat(O)
A = mat(A)
B = mat(B)
pi0 = mat(pi0)
n = shape(A)[0]
t = shape(O)[1]
#初始化后向概率
b = mat(ones((n, 1)))
#计算b1(i)
for i in range(t-1, 0, -1):
b = calcBackwardP(b, i, O, A, B)
#返回P(O|lamda)
return pi0.T * multiply(b, B[:, O[0, 0]])
#----------------------------------------------------------------------
def calcPi0(gammaMat):
"""
根据状态条件概率分布计算pi0
Parameter:
gammaMat:状态条件概率分布
Return:
pi0:隐马尔可夫模型初始概率
"""
tmp = mat(sum(gammaMat, axis=0)[:, 0]).T
return tmp / sum(tmp)
#----------------------------------------------------------------------
def calcA(epsilonMat):
"""
根据联合条件概率分布计算A
Parameter:
epsilonMat:联合条件概率分布
Return:
A:状态转移概率矩阵
"""
tmp = sum(epsilonMat, axis=0)
tmp = sum(tmp, axis=0)
sums = sum(tmp, axis=0)
return mat(tmp / sums)
#----------------------------------------------------------------------
def calcI(O, m):
"""
计算指示函数I(ot=vk)
Parameter:
O:观测变量
m:变量可能的取值数
Return:
I:指示函数I(ot=vk)
"""
t = shape(O)[1]
I = zeros((t, m))
#当ot==vk时,I[t, k] = 1
#O中元素的坐标(时刻)和值确定I中的横纵坐标
I[[range(t), O]] = 1
return mat(I)
#----------------------------------------------------------------------
def calcIs(O, m):
"""
计算指示函数集
Parameter:
O:观测变量矩阵
m:变量可能的取值数
Return:
Is:指示函数集
"""
s, t = shape(O)
Is = zeros((s, t, m))
for i in range(s):
instr = calcI(O[i], m)
Is[i] = instr
return Is
#----------------------------------------------------------------------
def calcB(gammaMat, O, m):
"""
根据状态条件概率分布计算B
Parameter:
gammaMat:状态条件概率分布矩阵
O:观测变量矩阵
m:观测变量的可能取值数
Return:
B:观测概率分布矩阵
"""
s, n, t = shape(gammaMat)
#计算指示函数矩阵
Is = calcIs(O, m)
numeratorMat = zeros((s, n, m))
denominatorMat = zeros((s, n, m))
I = ones((t, m))
for i in range(s):
numerator = dot(gammaMat[i], Is[i])
denominator = dot(gammaMat[i], I)
numeratorMat[i] = numerator
denominatorMat[i] = denominator
numer = sum(numeratorMat, axis=0)
denom = sum(denominatorMat, axis=0)
return mat(numer / denom)
#----------------------------------------------------------------------
def BaumWelch(O, N, M, theta=1e-7):
"""
Baum-Welch算法
Parameter:
O:观测数据矩阵
N:可能的隐状态数
M:可能的观测数
Return:
A:隐马尔可夫模型状态转移矩阵
B:隐马尔可夫模型概率分布矩阵
pi0:隐马尔可夫模型初始概率
"""
#选取模型初值
A = mat(random.random((N, N)))
A = A / sum(A, axis=0)
B = mat(random.random((N, M)))
B = B / sum(B, axis=0)
pi0 = mat(random.random((N, 1)))
pi0 = pi0 / sum(pi0, axis=0)
error = sum(abs(A))
cnt = 0
#停止条件
while error > theta:
#计算前向概率矩阵
alphasMat = calcForwardPMats(O, A, B, pi0)
#计算后向概率矩阵
betasMat = calcBackwardPMats(O, A, B, pi0)
#计算状态条件概率矩阵
gammaMat = calcGammaMats(alphasMat, betasMat)
#计算联合条件概率矩阵
epsilonMat = calcEpsilonMats(alphasMat, betasMat, A, B, O)
#更新pi0
pi0 = calcPi0(gammaMat)
#更新A
newA = calcA(epsilonMat)
#更新B
B = calcB(gammaMat, O, M)
#计算error
error = sum(abs(A - newA))
A = newA
#验证,计算P(O|lamda)并打印
prob = 1.0
for i in O:
prob *= forwardAlgo(pi0, A, B, i)
print '-----start------------'
print 'prob: ', prob
print 'cnt: ', cnt
print 'error:', error
cnt += 1
return A, B, pi0
########################################################################
class HMMTest(unittest.TestCase):
""""""
#----------------------------------------------------------------------
def test_0(self):
A = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
B = [[1.1, 2.1, 3.1, 4.1],
[5.1, 6.1, 7.1, 8.1],
[9.1, 10.1, 11.1, 12.1]]
a = [[1.2], [2.2], [3.2]]
t1 = 3
O = [[0, 1, 2, 3]]
tmp = [[1 * 1.2 + 4 * 2.2 + 7 * 3.2],
[2 * 1.2 + 5 * 2.2 + 8 * 3.2],
[3 * 1.2 + 6 * 2.2 + 9 * 3.2]]
result = [[tmp[0][0] * B[0][t1]],
[tmp[1][0] * B[1][t1]],
[tmp[2][0] * B[2][t1]]]
self.assertListEqual(calcForwardP(mat(a),
t1,
mat(O),
mat(A),
mat(B)).tolist(),
result)
#----------------------------------------------------------------------
def test_1(self):
""""""
A = [[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]]
B = [[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]]
pi0 = [[0.2], [0.4], [0.4]]
O = [[0, 1, 0]]
self.assertAlmostEqual(forwardAlgo(mat(pi0),
mat(A),
mat(B),
mat(O)),
0.13022, delta=0.00001)
#----------------------------------------------------------------------
def test_2(self):
A = [[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]]
B = [[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]]
pi0 = [[0.2], [0.4], [0.4]]
b = [[1], [1], [1]]
O = [[0, 1, 0]]
tmp = [[b[0][0]*B[0][0], b[0][0]*B[0][1]],
[b[1][0]*B[1][0], b[1][0]*B[1][1]],
[b[2][0]*B[2][0], b[2][0]*B[2][1]]]
result = [[A[0][0]*tmp[0][0] + A[0][1]*tmp[1][0] + A[0][2]*tmp[2][0]],
[A[1][0]*tmp[0][0] + A[1][1]*tmp[1][0] + A[1][2]*tmp[2][0]],
[A[2][0]*tmp[0][0] + A[2][1]*tmp[1][0] + A[2][2]*tmp[2][0]]]
b = calcBackwardP(mat(b),
2,
mat(O),
mat(A),
mat(B))
self.assertListEqual(b.tolist(), result)
#----------------------------------------------------------------------
def test_3(self):
""""""
A = [[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]]
B = [[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]]
pi0 = [[0.2], [0.4], [0.4]]
O = [[0, 1, 0]]
prob = backwardAlgo(O, A, B, pi0)
self.assertAlmostEqual(prob[0, 0], 0.13022, delta=0.00001)
#----------------------------------------------------------------------
def test_calcGamma(self):
""""""
alpha = mat([[1], [2], [3]])
beta = mat([[4], [5], [6]])
result = mat([[alpha[0, 0]*beta[0, 0]],
[alpha[1, 0]*beta[1, 0]],
[alpha[2, 0]*beta[2, 0]]])
result = result / sum(result)
self.assertListEqual(calcGamma(alpha, beta).tolist(),
result.tolist())
#----------------------------------------------------------------------
def test_calcEpsilon(self):
""""""
A = mat([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
B = mat([[10.0, 11.0],
[12.0, 13.0],
[14.0, 15.0]])
O = mat([[0, 1, 0, 1, 0]])
t = 4
alpha = mat([[16.0],
[17.0],
[18.0]])
beta = mat([[19.0],
[20.0],
[21.0]])
b = mat([[10.0],
[12.0],
[14.0]])
e11 = alpha[0, 0] * A[0, 0] * b[0, 0] * beta[0, 0]
e12 = alpha[0, 0] * A[0, 1] * b[1, 0] * beta[1, 0]
e13 = alpha[0, 0] * A[0, 2] * b[2, 0] * beta[2, 0]
e21 = alpha[1, 0] * A[1, 0] * b[0, 0] * beta[0, 0]
e22 = alpha[1, 0] * A[1, 1] * b[1, 0] * beta[1, 0]
e23 = alpha[1, 0] * A[1, 2] * b[2, 0] * beta[2, 0]
e31 = alpha[2, 0] * A[2, 0] * b[0, 0] * beta[0, 0]
e32 = alpha[2, 0] * A[2, 1] * b[1, 0] * beta[1, 0]
e33 = alpha[2, 0] * A[2, 2] * b[2, 0] * beta[2, 0]
e = mat([[e11, e12, e13],
[e21, e22, e23],
[e31, e32, e33]])
e = e / (e11 + e12 + e13 + e21 + e22 + e23 + e31 + e32 + e33)
result = calcEpsilon(alpha, beta, A, B, O, t)
self.assertListEqual(e.tolist(),
result.tolist())
#----------------------------------------------------------------------
def test_calcForwardPMat(self):
""""""
A = mat([[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]])
B = mat([[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]])
pi0 = mat([[0.2], [0.4], [0.4]])
O = mat([[0, 1, 0]])
alphas = mat(zeros((3, 3)))
alpha = multiply(pi0, B[:, O[0, 0]])
alphas[:, 0] = alpha
for i in range(1, 3):
alpha = calcForwardP(alpha, i, O, A, B)
alphas[:, i] = alpha
newalphas = calcForwardPMat(O, A, B, pi0)
self.assertListEqual(alphas.tolist(), newalphas.tolist())
#----------------------------------------------------------------------
def test_calcForwardPMats(self):
""""""
A = mat([[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]])
B = mat([[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]])
pi0 = mat([[0.2], [0.4], [0.4]])
O = mat([[0, 1, 0],
[0, 1, 0]])
alphas = mat(zeros((3, 3)))
alpha = multiply(pi0, B[:, O[0, 0]])
alphas[:, 0] = alpha
for i in range(1, 3):
alpha = calcForwardP(alpha, i, O, A, B)
alphas[:, i] = alpha
newalphas = calcForwardPMats(O, A, B, pi0)
self.assertListEqual([alphas.tolist(), alphas.tolist()], newalphas.tolist())
#----------------------------------------------------------------------
def test_calcBackwardPMat(self):
""""""
A = mat([[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]])
B = mat([[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]])
pi0 = mat([[0.2], [0.4], [0.4]])
O = mat([[0, 1, 0]])
betas = calcBackwardPMat(O, A, B, pi0)
result = multiply(pi0, B[:, O[0, 0]]).T * betas[:, 0]
self.assertAlmostEqual(result[0, 0], 0.13022, delta=0.00001)
alphas = calcForwardPMat(O, A, B, pi0)
self.assertEqual(result[0, 0], sum(alphas, axis=0)[0, shape(O)[1]-1])
#----------------------------------------------------------------------
def test_calcBackwardPMats(self):
""""""
A = mat([[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]])
B = mat([[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]])
pi0 = mat([[0.2], [0.4], [0.4]])
O = mat([[0, 1, 0],
[0, 1, 0],
[0, 1, 0]])
n, m = shape(B)
s, t = shape(O)
betas = calcBackwardPMat(O, A, B, pi0)
betasMat = calcBackwardPMats(O, A, B, pi0)
self.assertEqual(betasMat.tolist(), [betas.tolist()]*s)
#----------------------------------------------------------------------
def test_calcGammaMat(self):
""""""
A = mat([[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]])
B = mat([[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]])
pi0 = mat([[0.2], [0.4], [0.4]])
O = mat([[0, 1, 0]])
alphas = calcForwardPMat(O, A, B, pi0)
betas = calcBackwardPMat(O, A, B, pi0)
t = shape(O)[1]
n = shape(A)[1]
gammas0 = mat(zeros((n, t)))
for i in range(1, t+1):
gamma = multiply(alphas[:, i-1], betas[:, i-1])
gammas0[:, i-1] = gamma
gammas = calcGammaMat(alphas, betas)
self.assertListEqual(gammas0.tolist(), gammas.tolist())
#----------------------------------------------------------------------
def test_calcGammaMats(self):
""""""
A = mat([[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]])
B = mat([[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]])
pi0 = mat([[0.2], [0.4], [0.4]])
O = mat([[0, 1, 0]])
O1 = mat([[0, 1, 0],
[0, 1, 0]])
alphas = calcForwardPMat(O, A, B, pi0)
betas = calcBackwardPMat(O, A, B, pi0)
t = shape(O)[1]
n = shape(A)[1]
gammas0 = mat(zeros((n, t)))
for i in range(1, t+1):
gamma = multiply(alphas[:, i-1], betas[:, i-1])
gammas0[:, i-1] = gamma
gammas = calcGammaMat(alphas, betas)
self.assertListEqual(gammas0.tolist(), gammas.tolist())
alphasMat = calcForwardPMats(O1, A, B, pi0)
betasMat = calcBackwardPMats(O1, A, B, pi0)
gammasMat = calcGammaMats(alphasMat, betasMat)
self.assertListEqual(gammasMat.tolist(), [gammas.tolist()]*2)
#----------------------------------------------------------------------
def test_calcEpsilonMat(self):
""""""
A = mat([[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]])
B = mat([[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]])
pi0 = mat([[0.2], [0.4], [0.4]])
O = mat([[0, 1, 0]])
alphas = calcForwardPMat(O, A, B, pi0)
betas = calcBackwardPMat(O, A, B, pi0)
t = shape(O)[1]
n, m = shape(B)
epsilons0 = zeros((t-1, n, n))
for i in range(1, t):
epsilon = calcEpsilon0(alphas[:, i-1],
betas[:, i],
A,
B,
O,
i)[0]
epsilons0[i-1] = epsilon
epsilons = calcEpsilonMat(alphas, betas, A, B, O)
self.assertListEqual(epsilons.tolist(), epsilons0.tolist())
#----------------------------------------------------------------------
def test_calcEpsilonMats(self):
""""""
A = mat([[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]])
B = mat([[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]])
pi0 = mat([[0.2], [0.4], [0.4]])
O = mat([[0, 1, 0]])
alphas = calcForwardPMat(O, A, B, pi0)
betas = calcBackwardPMat(O, A, B, pi0)
t = shape(O)[1]
n, m = shape(B)
epsilons0 = zeros((t-1, n, n))
for i in range(1, t):
epsilon = calcEpsilon0(alphas[:, i-1],
betas[:, i],
A,
B,
O,
i)[0]
epsilons0[i-1] = epsilon
epsilons = calcEpsilonMat(alphas, betas, A, B, O)
self.assertListEqual(epsilons.tolist(), epsilons0.tolist())
O1 = mat([[0, 1, 0],
[0, 1, 0],
[0, 1, 0]])
alphasMat = calcForwardPMats(O1, A, B, pi0)
betasMat = calcBackwardPMats(O1, A, B, pi0)
epsilonsMat = calcEpsilonMats(alphasMat, betasMat, A, B, O1)
self.assertListEqual(epsilonsMat.tolist(),
[epsilons.tolist()]*3)
#----------------------------------------------------------------------
def test_calcPi0(self):
""""""
s = 2
n = 3
t = 4
gammas = random.random((s, n, t))
gammas0 = gammas[0, 0, 0] + gammas[1, 0, 0]
gammas1 = gammas[0, 1, 0] + gammas[1, 1, 0]
gammas2 = gammas[0, 2, 0] + gammas[1, 2, 0]
pi0 = mat([[gammas0, gammas1, gammas2]]).T / (gammas0+gammas1+gammas2)
newpi = calcPi0(gammas)
self.assertListEqual(pi0.tolist(), newpi.tolist())
#----------------------------------------------------------------------
def test_calcA(self):
""""""
s = 2
t = 2
n = 3
eps = random.random((2, 2, 3, 3))
a11 = eps[0, 0, 0, 0] + eps[1, 0, 0, 0] + eps[0, 1, 0, 0] + eps[1, 1, 0, 0]
a12 = eps[0, 0, 0, 1] + eps[1, 0, 0, 1] + eps[0, 1, 0, 1] + eps[1, 1, 0, 1]
a13 = eps[0, 0, 0, 2] + eps[1, 0, 0, 2] + eps[0, 1, 0, 2] + eps[1, 1, 0, 2]
a21 = eps[0, 0, 1, 0] + eps[1, 0, 1, 0] + eps[0, 1, 1, 0] + eps[1, 1, 1, 0]
a22 = eps[0, 0, 1, 1] + eps[1, 0, 1, 1] + eps[0, 1, 1, 1] + eps[1, 1, 1, 1]
a23 = eps[0, 0, 1, 2] + eps[1, 0, 1, 2] + eps[0, 1, 1, 2] + eps[1, 1, 1, 2]
a31 = eps[0, 0, 2, 0] + eps[1, 0, 2, 0] + eps[0, 1, 2, 0] + eps[1, 1, 2, 0]
a32 = eps[0, 0, 2, 1] + eps[1, 0, 2, 1] + eps[0, 1, 2, 1] + eps[1, 1, 2, 1]
a33 = eps[0, 0, 2, 2] + eps[1, 0, 2, 2] + eps[0, 1, 2, 2] + eps[1, 1, 2, 2]
result = mat([[a11, a12, a13], [a21, a22, a23], [a31, a32, a33]])
result = result / mat([a11+a21+a31, a12+a22+a32, a13+a23+a33])
A = calcA(eps)
#self.assertListEqual(result.tolist(), A.tolist())
for i in range(n):
for j in range(n):
self.assertAlmostEqual(result[i, j], A[i, j], delta=0.000001)
#----------------------------------------------------------------------
def test_calcI(self):
""""""
#om = 0, 1, 2, 3
m = 4
O = mat([[1, 2, 3, 2, 1]])
result = mat([[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]])
self.assertListEqual(calcI(O, m).tolist(), result.tolist())
#----------------------------------------------------------------------
def test_calcIs(self):
""""""
#om = 0, 1, 2, 3
m = 4
O = mat([[1, 2, 3, 2, 1],
[2, 3, 1, 0, 0]])
result0 = [[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]]
result1 = [[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]]
result = array([result0, result1])
self.assertListEqual(calcIs(O, m).tolist(), result.tolist())
#----------------------------------------------------------------------
def test_calcB(self):
""""""
gammaMat = random.random((2, 3, 4))
s, n, t = shape(gammaMat)
m = 2
O = mat([[0, 1, 1, 0],
[1, 0, 0, 0]])
n111 = gammaMat[0, 0, 0] + gammaMat[0, 0, 3]
n112 = gammaMat[0, 0, 1] + gammaMat[0, 0, 2]
n121 = gammaMat[0, 1, 0] + gammaMat[0, 1, 3]
n122 = gammaMat[0, 1, 1] + gammaMat[0, 1, 2]
n131 = gammaMat[0, 2, 0] + gammaMat[0, 2, 3]
n132 = gammaMat[0, 2, 1] + gammaMat[0, 2, 2]
n211 = gammaMat[1, 0, 1] + gammaMat[1, 0, 2] + gammaMat[1, 0, 3]
n212 = gammaMat[1, 0, 0]
n221 = gammaMat[1, 1, 1] + gammaMat[1, 1, 2] + gammaMat[1, 1, 3]
n222 = gammaMat[1, 1, 0]
n231 = gammaMat[1, 2, 1] + gammaMat[1, 2, 2] + gammaMat[1, 2, 3]
n232 = gammaMat[1, 2, 0]
n11 = n111 + n211
n12 = n112 + n212
n21 = n121 + n221
n22 = n122 + n222
n31 = n131 + n231
n32 = n132 + n232
n = array([[n11, n12],
[n21, n22],
[n31, n32]])
d111 = gammaMat[0, 0, 0] + gammaMat[0, 0, 1] + gammaMat[0, 0, 2] + gammaMat[0, 0, 3]
d112 = gammaMat[0, 0, 0] + gammaMat[0, 0, 1] + gammaMat[0, 0, 2] + gammaMat[0, 0, 3]
d121 = gammaMat[0, 1, 0] + gammaMat[0, 1, 1] + gammaMat[0, 1, 2] + gammaMat[0, 1, 3]
d122 = gammaMat[0, 1, 0] + gammaMat[0, 1, 1] + gammaMat[0, 1, 2] + gammaMat[0, 1, 3]
d131 = gammaMat[0, 2, 0] + gammaMat[0, 2, 1] + gammaMat[0, 2, 2] + gammaMat[0, 2, 3]
d132 = gammaMat[0, 2, 0] + gammaMat[0, 2, 1] + gammaMat[0, 2, 2] + gammaMat[0, 2, 3]
d211 = gammaMat[1, 0, 0] + gammaMat[1, 0, 1] + gammaMat[1, 0, 2] + gammaMat[1, 0, 3]
d212 = gammaMat[1, 0, 0] + gammaMat[1, 0, 1] + gammaMat[1, 0, 2] + gammaMat[1, 0, 3]
d221 = gammaMat[1, 1, 0] + gammaMat[1, 1, 1] + gammaMat[1, 1, 2] + gammaMat[1, 1, 3]
d222 = gammaMat[1, 1, 0] + gammaMat[1, 1, 1] + gammaMat[1, 1, 2] + gammaMat[1, 1, 3]
d231 = gammaMat[1, 2, 0] + gammaMat[1, 2, 1] + gammaMat[1, 2, 2] + gammaMat[1, 2, 3]
d232 = gammaMat[1, 2, 0] + gammaMat[1, 2, 1] + gammaMat[1, 2, 2] + gammaMat[1, 2, 3]
d11 = d111 + d211
d12 = d112 + d212
d21 = d121 + d221
d22 = d122 + d222
d31 = d131 + d231
d32 = d132 + d232
d = array([[d11, d12],
[d21, d22],
[d31, d32]])
result = n / d
self.assertListEqual(calcB(gammaMat, O, m).tolist(), result.tolist())
#----------------------------------------------------------------------
def test_BaumWelch(self):
""""""
#O = mat([[1, 2, 3, 3, 2, 1, 0, 3],
#[0, 1, 2, 0, 2, 3, 1, 0]])
O = mat(random.choice(range(4), (1000, 8)))
M = 4
N = 5
print BaumWelch(O, N, M)
#----------------------------------------------------------------------
def suite():
""""""
suite = unittest.TestSuite()
suite.addTest(HMMTest('test_0'))
suite.addTest(HMMTest('test_1'))
suite.addTest(HMMTest('test_2'))
suite.addTest(HMMTest('test_3'))
suite.addTest(HMMTest('test_calcGamma'))
suite.addTest(HMMTest('test_calcEpsilon'))
suite.addTest(HMMTest('test_calcForwardPMat'))
suite.addTest(HMMTest('test_calcForwardPMats'))
suite.addTest(HMMTest('test_calcBackwardPMat'))
suite.addTest(HMMTest('test_calcBackwardPMats'))
suite.addTest(HMMTest('test_calcGammaMat'))
suite.addTest(HMMTest('test_calcGammaMats'))
suite.addTest(HMMTest('test_calcEpsilonMat'))
suite.addTest(HMMTest('test_calcEpsilonMats'))
suite.addTest(HMMTest('test_calcPi0'))
suite.addTest(HMMTest('test_calcA'))
suite.addTest(HMMTest('test_calcI'))
suite.addTest(HMMTest('test_calcIs'))
suite.addTest(HMMTest('test_calcB'))
suite.addTest(HMMTest('test_BaumWelch'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite') | [
"[email protected]"
] | |
8a9bb8495e53836929e9fd749a53a604f9cec9a4 | 43fac2df4893f5b5448dd64f3c9e59ddebca7efe | /andromap/polytools.py | d8d3e5933a73f9d6a0e4bb11d12400f27bf2b6e7 | [
"BSD-3-Clause"
] | permissive | jonathansick/andromap | 8a159eb7a85ab5ea7602cdff1755ba411ef38d97 | b8905a39e0a45c7d803e45ce9a78c64ecac00bee | refs/heads/master | 2021-01-18T23:01:36.996791 | 2017-01-27T05:32:06 | 2017-01-27T05:32:06 | 14,810,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | #!/usr/bin/env python
# encoding: utf-8
"""
Tools for working with polygons through shapely
2013-12-11 - Created by Jonathan Sick
"""
from shapely.geometry import Polygon, MultiPolygon
from shapely.ops import cascaded_union
import numpy as np
def close_vertices(polygon):
"""Make the last vertex the same as the first."""
polygon.append(polygon[0])
return polygon
def polygon_union(polygons):
"""Make the union of polygons. Returns a list of all isolated polygon
unions."""
shapely_polys = [Polygon(p) for p in polygons]
multipoly = MultiPolygon(shapely_polys)
u = cascaded_union(multipoly)
if isinstance(u, MultiPolygon):
vert_seq = []
for p in u:
vert_seq.append(np.array(p.exterior.coords[:]))
return vert_seq
else:
return [np.array(u.exterior.coords[:])]
| [
"[email protected]"
] | |
3a5c2d05027ad0b4d276f391d8f18fc7563905d0 | ba0731b2dbc4c1529eaaa79811ec15754c19b4cd | /extractors/refextract/extract/routes.py | da9b4cb042c0fb5305c062073e04e91f29db71c1 | [
"MIT"
] | permissive | arXiv/arxiv-references | 35f87084cf91947c572faf1a86f119b308fada66 | a755aeaa864ff807ff16ae2c3960f9fee54d8dd8 | refs/heads/master | 2022-12-21T02:34:57.166298 | 2018-05-04T20:30:48 | 2018-05-04T20:30:48 | 94,906,433 | 8 | 6 | MIT | 2022-12-08T02:06:20 | 2017-06-20T15:26:25 | Python | UTF-8 | Python | false | false | 2,709 | py | """HTTP routes for refextract API."""
import os
from refextract import extract_references_from_file
from flask.json import jsonify
from flask import Blueprint, request, current_app
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
import logging
HTTP_200_OK = 200
HTTP_400_BAD_REQUEST = 400
HTTP_500_INTERNAL_SERVER_ERROR = 500
blueprint = Blueprint('refextract', __name__, url_prefix='/refextract')
def getLogger():
"""Create a logger based on application configuration."""
default_format = '%(asctime)s - %(name)s - %(levelname)s: %(message)s'
try:
log_level = int(current_app.config.get('LOGLEVEL', logging.INFO))
log_format = current_app.config.get('LOGFORMAT', default_format)
log_file = current_app.config.get('LOGFILE')
except AttributeError:
log_level = logging.INFO
log_format = default_format
log_file = None
logging.basicConfig(format=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
if log_file is not None:
logger.addHandler(logging.FileHandler(log_file))
return logger
def handle_upload(uploaded_file: FileStorage) -> str:
"""Store an uploaded file."""
filename = secure_filename(uploaded_file.filename)
if not filename.endswith('.pdf'):
raise ValueError('Unsupported file type')
filepath = os.path.join(current_app.config['UPLOAD_PATH'], filename)
uploaded_file.save(filepath)
return filepath
def cleanup_upload(filepath: str) -> None:
"""Remove uploaded file."""
if os.path.exists(filepath):
os.remove(filepath)
return
@blueprint.route('/status', methods=['GET'])
def status() -> tuple:
"""Health check endpoint."""
return jsonify({'iam': 'ok'}), HTTP_200_OK
@blueprint.route('/extract', methods=['POST'])
def extract() -> tuple:
"""Handle a request for reference extraction for a POSTed PDF."""
logger = getLogger()
if 'file' not in request.files:
return jsonify({'explanation': 'No file found'}), HTTP_400_BAD_REQUEST
try:
filepath = handle_upload(request.files['file'])
except ValueError as e:
return jsonify({'explanation': e.msg}), HTTP_400_BAD_REQUEST
try:
response_data = extract_references_from_file(filepath)
status = HTTP_200_OK
except Exception as e:
response_data = {'explanation': 'refextract failed: %s' % e}
status = HTTP_500_INTERNAL_SERVER_ERROR
finally:
try:
cleanup_upload(filepath)
except IOError as e:
logger.warning('Could not remove file %s: %s' % filepath, e)
return jsonify(response_data), status
| [
"[email protected]"
] | |
ca14cfcc4f0020bf14072b28b13b5efd0c0a1140 | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/real-life/test_sof_example.py | 67de4d5643861c429a1c60920db8424086e6352e | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import datetime
#get_ipython().magic('matplotlib inline')
trainfile = "data/sof_example.csv";
df = pd.read_csv(trainfile, sep=r',', engine='python', skiprows=0);
df['Date'] = df['Date'].apply(lambda x : datetime.datetime.strptime(x, "%m/%d/%Y"))
print(df.head());
lDateVar = 'Date'
lSignalVar = 'Used'
lEngine = autof.cForecastEngine()
lEngine
H = 10;
#lEngine.mOptions.enable_slow_mode();
lEngine.mOptions.mDebugPerformance = True;
lEngine.train(df , lDateVar , lSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standrdPlots("outputs/sof_example");
dfapp_in = df.copy();
dfapp_in.tail()
#H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/sof_example_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[lDateVar , lSignalVar,
lSignalVar + '_Forecast' ,
lSignalVar + '_Forecast_Lower_Bound',
lSignalVar + '_Forecast_Upper_Bound']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(2*H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
| [
"[email protected]"
] | |
a1eb66dbe95cb27bee4d6db88aae787c660163b8 | 5b49b2f8934e21445757a36174d924bb713ad810 | /elvers/rules/rcorrector/rcorrector-wrapper.py | d38953bd8abd25a4ea0521807acf6cc1283d78a0 | [
"BSD-3-Clause"
] | permissive | maligang/elvers | bf982442d08411a7081f186c7b76ac09035b0466 | b2dab8092351f6db13c437c89004b897fb8a40d7 | refs/heads/master | 2022-12-11T12:45:06.905102 | 2020-08-05T17:09:50 | 2020-08-05T17:09:50 | 290,427,388 | 0 | 0 | NOASSERTION | 2020-08-26T07:30:10 | 2020-08-26T07:30:09 | null | UTF-8 | Python | false | false | 2,509 | py | __author__ = "N .Tessa Pierce"
__copyright__ = "Copyright 2019, N. Tessa Pierce"
__email__ = "[email protected]"
__license__ = "MIT"
from os import path
from snakemake.shell import shell
extra = snakemake.params.get("extra", "")
log = snakemake.log_fmt_shell(stdout=True, stderr=True)
outdir = path.dirname(snakemake.output.get('r1'))
r1 = snakemake.input.get("r1")
r2 = snakemake.input.get("r2")
r = snakemake.input.get("r")
def move_files(outdir, in_list, out_list):
for f, o in zip(in_list, out_list):
f = path.join(outdir, f)
shell("cp {f} {o}")
shell("rm -f {f}")
def build_default_outname(infile):
# Rcorrector outputs gzipped files IF input files are gzipped
end = '.gz' if infile.endswith('.gz') else ''
return(path.basename(infile.rsplit('.f')[0]) + '.cor.fq' + end)
assert (r1 is not None and r2 is not None) or r is not None, "either r1 and r2 (paired), or r (unpaired) are required as input"
if r1:
# handle inputs
r1 = [snakemake.input.r1] if isinstance(snakemake.input.r1, str) else snakemake.input.r1
r2 = [snakemake.input.r2] if isinstance(snakemake.input.r2, str) else snakemake.input.r2
assert len(r1) == len(r2), "input-> equal number of files required for r1 and r2"
r1_cmd = ' -1 ' + ",".join(r1)
r2_cmd = ' -2 ' + ",".join(r2)
read_cmd = " ".join([r1_cmd,r2_cmd])
# handle outputs
r1_out = [snakemake.output.r1] if isinstance(snakemake.output.r1, str) else snakemake.output.r1
r2_out = [snakemake.output.r2] if isinstance(snakemake.output.r2, str) else snakemake.output.r2
r1_default, r2_default = [], []
for f in r1:
r1_default+= [build_default_outname(f)]
for f in r2:
r2_default+= [build_default_outname(f)]
if r:
# handle inputs
assert r1 is None and r2 is None, "cannot handle mixed paired/unpaired input files. Please input either r1,r2 (paired) or r (unpaired)"
r = [snakemake.input.r] if isinstance(snakemake.input.r, str) else snakemake.input.r
read_cmd = ' -r ' + ",".join(r)
# handle outputs
r_out = [snakemake.output.r] if isinstance(snakemake.output.r, str) else snakemake.output.r
r_default = []
for f in r:
r_default += [build_default_outname(f)]
shell("run_rcorrector.pl {read_cmd} -od {outdir} {snakemake.params.extra} -t {snakemake.threads} {log}")
if r1_default:
move_files(outdir, r1_default, r1_out)
move_files(outdir, r2_default, r2_out)
elif r_default:
move_files(outdir, r_default, r_out)
| [
"[email protected]"
] | |
1181b639354080f4efa652e95729190cdfb5fc52 | 0ead11cb6ff33e71903b49de3b8a0b0a273c4ece | /python/smqtk/representation/descriptor_element/_io.py | 3ea08361516ddbde7d31bbd476d304d15b742f3b | [] | no_license | Erotemic/SMQTK | b1ad8838516c88ce464ce9069c22a5b79e6097e7 | 0153a7bab694b84620e4a6ca9127d8cc8de52952 | refs/heads/master | 2021-04-27T09:09:48.244303 | 2018-02-06T20:43:11 | 2018-02-06T20:43:11 | 122,508,206 | 0 | 0 | null | 2018-02-22T16:59:35 | 2018-02-22T16:59:35 | null | UTF-8 | Python | false | false | 12,044 | py | import logging
import multiprocessing
import multiprocessing.queues
import Queue
import sys
import threading
import time
import numpy
from smqtk.utils import SmqtkObject
__author__ = '[email protected]'
__all__ = [
'elements_to_matrix',
]
def elements_to_matrix(descr_elements, mat=None, procs=None, buffer_factor=2,
report_interval=None, use_multiprocessing=False,
thread_q_put_interval=0.001):
"""
Add to or create a numpy matrix, adding to it the vector data contained in
a sequence of DescriptorElement instances using asynchronous processing.
If ``mat`` is provided, its shape must equal:
( len(descr_elements) , descr_elements[0].size )
:param descr_elements: Sequence of DescriptorElement objects to transform
into a matrix. Each element should contain descriptor vectors of the
same size.
:type descr_elements:
collections.Sequence[smqtk.representation.DescriptorElement] |
collections.Iterable[smqtk.representation.DescriptorElement]
:param mat: Optionally a pre-constructed numpy matrix of the shape
``(nDescriptors, nFeatures)`` to load descriptor vectors into. We will
only iterate ``nDescriptors`` into the given ``descr_elements``
iterable. If there are more rows in the given matrix than there are
DescriptorElements in ``descr_elements``, then not all rows in the
given matrix will be set. Elements yielded by ``descr_elements`` must
be of the same dimensionality as this given matrix (``nFeatures``)
otherwise an exception will be raised (``ValueError``, by numpy).
If this is not supplied, we create a new matrix to insert vectors into
based on the number of input descriptor elements. This mode required
that the input elements are in a container that defines __len__
:type mat: None | numpy.core.multiarray.ndarray
:param procs: Optional specification of the number of threads/cores to use.
If None, we will attempt to use all available threads/cores.
:type procs: None | int | long
:param buffer_factor: Multiplier against the number of processes used to
limit the growth size of the result queue coming from worker processes.
:type buffer_factor: float
:param report_interval: Optional interval in seconds for debug logging to
occur reporting about conversion speed. This should be greater than 0
if this debug logging is desired.
:type report_interval: None | float
:param use_multiprocessing: Whether or not to use discrete processes as the
parallelization agent vs python threads.
:type use_multiprocessing: bool
:param thread_q_put_interval: Interval at worker threads attempt to insert
values into the output queue after fetching vector from a
DescriptorElement. This is for dead-lock protection due to size-limited
output queue. This is only used if ``use_multiprocessing`` is ``False``
and this must be >0.
:type thread_q_put_interval: float
:return: Created or input matrix.
:rtype: numpy.core.multiarray.ndarray
"""
log = logging.getLogger(__name__)
# Create/check matrix
if mat is None:
sample = descr_elements.__iter__().next()
sample_v = sample.vector()
shp = (len(descr_elements),
sample_v.size)
log.debug("Creating new matrix with shape: %s", shp)
mat = numpy.ndarray(shp, sample_v.dtype)
if procs is None:
procs = multiprocessing.cpu_count()
# Choose parallel types
worker_kwds = {}
if use_multiprocessing:
queue_t = multiprocessing.Queue
worker_t = _ElemVectorExtractorProcess
else:
queue_t = Queue.Queue
worker_t = _ElemVectorExtractorThread
assert thread_q_put_interval >= 0, \
"Thread queue.put interval must be >= 0. (given: %f)" \
% thread_q_put_interval
worker_kwds['q_put_interval'] = thread_q_put_interval
in_q = queue_t()
out_q = queue_t(int(procs * buffer_factor))
# Workers for async extraction
log.debug("constructing worker processes")
workers = [worker_t(i, in_q, out_q, **worker_kwds) for i in range(procs)]
in_queue_t = _FeedQueueThread(descr_elements, in_q, mat, len(workers))
try:
# Start worker processes
log.debug("starting worker processes")
for w in workers:
w.daemon = True
w.start()
log.debug("Sending work packets")
in_queue_t.daemon = True
in_queue_t.start()
# Collect work from async
log.debug("Aggregating async results")
terminals_collected = 0
f = 0
lt = t = time.time()
while terminals_collected < len(workers):
packet = out_q.get()
if packet is None:
terminals_collected += 1
elif isinstance(packet, Exception):
raise packet
else:
r, v = packet
mat[r] = v
f += 1
if report_interval and time.time() - lt >= report_interval:
log.debug("Rows per second: %f, Total: %d",
f / (time.time() - t), f)
lt = time.time()
# All work should be exhausted at this point
if use_multiprocessing and sys.platform == 'darwin':
# multiprocessing.Queue.qsize doesn't work on OSX
# Try to get something from each queue, expecting an empty exception
try:
in_q.get(block=False)
except multiprocessing.queues.Empty:
pass
else:
raise AssertionError("In queue not empty")
try:
out_q.get(block=False)
except multiprocessing.queues.Empty:
pass
else:
raise AssertionError("Out queue not empty")
else:
assert in_q.qsize() == 0, "In queue not empty"
assert out_q.qsize() == 0, "Out queue not empty"
return mat
finally:
log.debug("Stopping/Joining queue feeder thread")
in_queue_t.stop()
in_queue_t.join()
if use_multiprocessing:
# Forcibly terminate worker processes if still alive
log.debug("Joining/Terminating process workers")
for w in workers:
if w.is_alive():
w.terminate()
w.join()
log.debug("Cleaning multiprocess queues")
for q in (in_q, out_q):
q.close()
q.join_thread()
else:
log.debug("Stopping/Joining threaded workers")
for w in workers:
w.stop()
# w.join()
# Threads should exit fine from here
log.debug("Done")
class _FeedQueueThread (SmqtkObject, threading.Thread):
def __init__(self, descr_elements, q, out_mat, num_terminal_packets):
super(_FeedQueueThread, self).__init__()
self.num_terminal_packets = num_terminal_packets
self.out_mat = out_mat
self.q = q
self.descr_elements = descr_elements
self._stop = threading.Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def run(self):
try:
# Special case for in-memory storage of descriptors
from smqtk.representation.descriptor_element.local_elements \
import DescriptorMemoryElement
for r, d in enumerate(self.descr_elements):
# If we've run out of matrix to fill,
if r >= self.out_mat.shape[0]:
break
if isinstance(d, DescriptorMemoryElement):
self.out_mat[r] = d.vector()
else:
self.q.put((r, d))
# If we're told to stop, immediately quit out of processing
if self.stopped():
break
except KeyboardInterrupt:
pass
except Exception as ex:
self._log.error("Feeder thread encountered an exception: %s",
str(ex))
self.q.put(ex)
finally:
self._log.debug("Sending in-queue terminal packets")
for _ in range(self.num_terminal_packets):
self.q.put(None)
self._log.debug("Closing in-queue")
class _ElemVectorExtractorProcess (SmqtkObject, multiprocessing.Process):
"""
Helper process for extracting DescriptorElement vectors on a separate
process. This terminates with a None packet fed to in_q. Otherwise, in_q
values are expected to be (row, element) pairs. Tuples of the form
(row, vector) are published to the out_q.
Terminal value: None
"""
def __init__(self, i, in_q, out_q):
super(_ElemVectorExtractorProcess, self)\
.__init__(name='[w%d]' % i)
self._log.debug("Making process worker (%d, %s, %s)", i, in_q, out_q)
self.i = i
self.in_q = in_q
self.out_q = out_q
def run(self):
try:
packet = self.in_q.get()
while packet is not None:
if isinstance(packet, Exception):
self.out_q.put(packet)
else:
row, elem = packet
v = elem.vector()
self.out_q.put((row, v))
packet = self.in_q.get()
self.out_q.put(None)
except KeyboardInterrupt:
pass
except Exception as ex:
self._log.error("%s%s encountered an exception: %s",
self.__class__.__name__, self.name,
str(ex))
self.out_q.put(ex)
class _ElemVectorExtractorThread (SmqtkObject, threading.Thread):
"""
Helper process for extracting DescriptorElement vectors on a separate
process. This terminates with a None packet fed to in_q. Otherwise, in_q
values are expected to be (row, element) pairs. Tuples of the form
(row, vector) are published to the out_q.
Terminal value: None
"""
def __init__(self, i, in_q, out_q, q_put_interval=0.001):
SmqtkObject.__init__(self)
threading.Thread.__init__(self, name='[w%d]' % i)
self._log.debug("Making thread worker (%d, %s, %s)", i, in_q, out_q)
self.i = i
self.in_q = in_q
self.out_q = out_q
self.q_put_interval = q_put_interval
self._stop = threading.Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def run(self):
try:
packet = self.in_q.get()
while packet is not None and not self.stopped():
if isinstance(packet, Exception):
self.out_q.put(packet)
else:
row, elem = packet
v = elem.vector()
self.q_put((row, v))
packet = self.in_q.get()
self.q_put(None)
except KeyboardInterrupt:
pass
except Exception as ex:
self._log.error("%s%s encountered an exception: %s",
self.__class__.__name__, self.name,
str(ex))
self.out_q.put(ex)
def q_put(self, val):
"""
Try to put the given value into the output queue until it is inserted
(if it was previously full), or the stop signal was given.
"""
put = False
while not put and not self.stopped():
try:
self.out_q.put(val, timeout=self.q_put_interval)
put = True
except Queue.Full:
# self._log.debug("Skipping q.put Full error")
pass
| [
"[email protected]"
] | |
0540f3bb95f151504c0503e6e8179fec0de7207e | f98f4aaeca3ac841905e0cd8547bbf41944fe690 | /编程语言/Python/Python编程从入门到实践/第一部分_基础知识/第10章_文件和异常/10_6.py | 3835fd1a20ce17bffe0d169f134703a891d57347 | [] | no_license | zhb339/book-learning | 64f433b1ee1f66f3120828352df3b533be4cf9de | 5273fc8d11b2d602484dbe95e55f1e931858382f | refs/heads/master | 2020-03-29T10:48:22.771146 | 2018-11-07T13:46:53 | 2018-11-07T13:46:53 | 149,823,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | while True:
try:
num1 = int(input("Enter first num: "))
num2 = int(input("Enter second num: "))
total = num1 + num2
print("The sum of two numbers is " + str(total) + ".")
except:
print("Please enter number!")
| [
"[email protected]"
] | |
3b5578ebf36b5c8a298cfc1216db7a3e9a6c4497 | 76d9bf90f046979866f88811a1a182e6c1ae9bc9 | /allennlp_semparse/common/sql/text2sql_utils.py | eeabed9539f122af9a279b998b62ff39ef3d953e | [
"Apache-2.0"
] | permissive | Tmr/allennlp-semparse | aba92040db36fe85d968c8fbffe21a24a0057a96 | 751c25f5f59c4d7973f03dc05210f9f94752f1b5 | refs/heads/master | 2023-04-28T15:08:11.653793 | 2022-04-07T18:01:33 | 2022-04-07T18:01:33 | 230,105,752 | 0 | 0 | Apache-2.0 | 2019-12-25T13:18:39 | 2019-12-25T13:18:38 | null | UTF-8 | Python | false | false | 10,377 | py | """
Utility functions for reading the standardised text2sql datasets presented in
`"Improving Text to SQL Evaluation Methodology" <https://arxiv.org/abs/1806.09029>`_
"""
from typing import List, Dict, NamedTuple, Iterable, Tuple, Set
from collections import defaultdict
from allennlp.common import JsonDict
class SqlData(NamedTuple):
"""
A utility class for reading in text2sql data.
Parameters
----------
text : ``List[str]``
The tokens in the text of the query.
text_with_variables : ``List[str]``
The tokens in the text of the query with variables
mapped to table names/abstract variables.
variable_tags : ``List[str]``
Labels for each word in ``text`` which correspond to
which variable in the sql the token is linked to. "O"
is used to denote no tag.
sql : ``List[str]``
The tokens in the SQL query which corresponds to the text.
text_variables : ``Dict[str, str]``
A dictionary of variables associated with the text, e.g. {"city_name0": "san fransisco"}
sql_variables : ``Dict[str, Dict[str, str]]``
A dictionary of variables and column references associated with the sql query.
"""
text: List[str]
text_with_variables: List[str]
variable_tags: List[str]
sql: List[str]
text_variables: Dict[str, str]
sql_variables: Dict[str, Dict[str, str]]
class TableColumn(NamedTuple):
name: str
column_type: str
is_primary_key: bool
def column_has_string_type(column: TableColumn) -> bool:
if "varchar" in column.column_type:
return True
elif column.column_type == "text":
return True
elif column.column_type == "longtext":
return True
return False
def column_has_numeric_type(column: TableColumn) -> bool:
if "int" in column.column_type:
return True
elif "float" in column.column_type:
return True
elif "double" in column.column_type:
return True
return False
def replace_variables(
sentence: List[str], sentence_variables: Dict[str, str]
) -> Tuple[List[str], List[str]]:
"""
Replaces abstract variables in text with their concrete counterparts.
"""
tokens = []
tags = []
for token in sentence:
if token not in sentence_variables:
tokens.append(token)
tags.append("O")
else:
for word in sentence_variables[token].split():
tokens.append(word)
tags.append(token)
return tokens, tags
def split_table_and_column_names(table: str) -> Iterable[str]:
partitioned = [x for x in table.partition(".") if x != ""]
# Avoid splitting decimal strings.
if partitioned[0].isnumeric() and partitioned[-1].isnumeric():
return [table]
return partitioned
def clean_and_split_sql(sql: str) -> List[str]:
"""
Cleans up and unifies a SQL query. This involves unifying quoted strings
and splitting brackets which aren't formatted consistently in the data.
"""
sql_tokens: List[str] = []
for token in sql.strip().split():
token = token.replace('"', "'").replace("%", "")
if token.endswith("(") and len(token) > 1:
sql_tokens.extend(split_table_and_column_names(token[:-1]))
sql_tokens.extend(split_table_and_column_names(token[-1]))
else:
sql_tokens.extend(split_table_and_column_names(token))
return sql_tokens
def resolve_primary_keys_in_schema(
sql_tokens: List[str], schema: Dict[str, List[TableColumn]]
) -> List[str]:
"""
Some examples in the text2sql datasets use ID as a column reference to the
column of a table which has a primary key. This causes problems if you are trying
to constrain a grammar to only produce the column names directly, because you don't
know what ID refers to. So instead of dealing with that, we just replace it.
"""
primary_keys_for_tables = {
name: max(columns, key=lambda x: x.is_primary_key).name for name, columns in schema.items()
}
resolved_tokens = []
for i, token in enumerate(sql_tokens):
if i > 2:
table_name = sql_tokens[i - 2]
if token == "ID" and table_name in primary_keys_for_tables.keys():
token = primary_keys_for_tables[table_name]
resolved_tokens.append(token)
return resolved_tokens
def clean_unneeded_aliases(sql_tokens: List[str]) -> List[str]:
unneeded_aliases = {}
previous_token = sql_tokens[0]
for (token, next_token) in zip(sql_tokens[1:-1], sql_tokens[2:]):
if token == "AS" and previous_token is not None:
# Check to see if the table name without the alias
# is the same.
table_name = next_token[:-6]
if table_name == previous_token:
# If so, store the mapping as a replacement.
unneeded_aliases[next_token] = previous_token
previous_token = token
dealiased_tokens: List[str] = []
for token in sql_tokens:
new_token = unneeded_aliases.get(token, None)
if new_token is not None and dealiased_tokens[-1] == "AS":
dealiased_tokens.pop()
continue
elif new_token is None:
new_token = token
dealiased_tokens.append(new_token)
return dealiased_tokens
def read_dataset_schema(schema_path: str) -> Dict[str, List[TableColumn]]:
"""
Reads a schema from the text2sql data, returning a dictionary
mapping table names to their columns and respective types.
This handles columns in an arbitrary order and also allows
either ``{Table, Field}`` or ``{Table, Field} Name`` as headers,
because both appear in the data. It also uppercases table and
column names if they are not already uppercase.
Parameters
----------
schema_path : ``str``, required.
The path to the csv schema.
Returns
-------
A dictionary mapping table names to typed columns.
"""
schema: Dict[str, List[TableColumn]] = defaultdict(list)
for i, line in enumerate(open(schema_path, "r")):
if i == 0:
header = [x.strip() for x in line.split(",")]
elif line[0] == "-":
continue
else:
data = {key: value for key, value in zip(header, [x.strip() for x in line.split(",")])}
table = data.get("Table Name", None) or data.get("Table")
column = data.get("Field Name", None) or data.get("Field")
is_primary_key = data.get("Primary Key") == "y"
schema[table.upper()].append(TableColumn(column.upper(), data["Type"], is_primary_key))
return {**schema}
def process_sql_data(
data: List[JsonDict],
use_all_sql: bool = False,
use_all_queries: bool = False,
remove_unneeded_aliases: bool = False,
schema: Dict[str, List[TableColumn]] = None,
) -> Iterable[SqlData]:
"""
A utility function for reading in text2sql data. The blob is
the result of loading the json from a file produced by the script
``scripts/reformat_text2sql_data.py``.
Parameters
----------
data : ``JsonDict``
use_all_sql : ``bool``, optional (default = False)
Whether to use all of the sql queries which have identical semantics,
or whether to just use the first one.
use_all_queries : ``bool``, (default = False)
Whether or not to enforce query sentence uniqueness. If false,
duplicated queries will occur in the dataset as separate instances,
as for a given SQL query, not only are there multiple queries with
the same template, but there are also duplicate queries.
remove_unneeded_aliases : ``bool``, (default = False)
The text2sql data by default creates alias names for `all` tables,
regardless of whether the table is derived or if it is identical to
the original (e.g SELECT TABLEalias0.COLUMN FROM TABLE AS TABLEalias0).
This is not necessary and makes the action sequence and grammar manipulation
much harder in a grammar based decoder. Note that this does not
remove aliases which are legitimately required, such as when a new
table is formed by performing operations on the original table.
schema : ``Dict[str, List[TableColumn]]``, optional, (default = None)
A schema to resolve primary keys against. Converts 'ID' column names
to their actual name with respect to the Primary Key for the table
in the schema.
"""
for example in data:
seen_sentences: Set[str] = set()
for sent_info in example["sentences"]:
# Loop over the different sql statements with "equivalent" semantics
for sql in example["sql"]:
text_with_variables = sent_info["text"].strip().split()
text_vars = sent_info["variables"]
query_tokens, tags = replace_variables(text_with_variables, text_vars)
if not use_all_queries:
key = " ".join(query_tokens)
if key in seen_sentences:
continue
else:
seen_sentences.add(key)
sql_tokens = clean_and_split_sql(sql)
if remove_unneeded_aliases:
sql_tokens = clean_unneeded_aliases(sql_tokens)
if schema is not None:
sql_tokens = resolve_primary_keys_in_schema(sql_tokens, schema)
sql_variables = {}
for variable in example["variables"]:
sql_variables[variable["name"]] = {
"text": variable["example"],
"type": variable["type"],
}
sql_data = SqlData(
text=query_tokens,
text_with_variables=text_with_variables,
variable_tags=tags,
sql=sql_tokens,
text_variables=text_vars,
sql_variables=sql_variables,
)
yield sql_data
# Some questions might have multiple equivalent SQL statements.
# By default, we just use the first one. TODO(Mark): Use the shortest?
if not use_all_sql:
break
| [
"[email protected]"
] | |
2beb8e21a0723043fb3bf99c43435fcea367c31d | 1be37064cf303b79cf83ab1d5d120a8db6dbeaa4 | /fastparquet/thrift_structures.py | 280b8583627af5b1b4c31b246d18bc52192c32fd | [
"Apache-2.0"
] | permissive | mrocklin/fastparquet | 02e0252609b8825b2ad89c0b21b3a6e20bc34f94 | b0171ba3a7894f4860c1ce761ad47f451fc4cbc8 | refs/heads/master | 2020-12-24T08:54:09.977491 | 2016-11-08T17:14:38 | 2016-11-08T17:14:38 | 73,311,617 | 2 | 0 | null | 2016-11-09T18:46:52 | 2016-11-09T18:46:51 | null | UTF-8 | Python | false | false | 207 | py | import os
import thriftpy
THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
parquet_thrift = thriftpy.load(THRIFT_FILE, module_name="parquet_thrift") # pylint: disable=invalid-name
| [
"[email protected]"
] | |
e763f92c41f17a6f2fdbc603b1a4ce8c2d338382 | 9a5438bdb8e84d0167ddea5458a7f729fdd54121 | /metadata/tests/test_models/test_meta/test_attribute.py | 5d8ef2b6856c0df33ffb8ea2141cb61013e7a8aa | [] | no_license | Grusinator/MetaDataApi | 740fd2be4cb97b670f827a071a0ac8c50f79f8ff | 081f881c735466ed1dbbd68646b821299c5168f8 | refs/heads/master | 2023-07-25T23:58:22.179717 | 2020-03-15T09:36:05 | 2020-03-15T09:36:05 | 149,087,967 | 5 | 1 | null | 2023-07-25T15:39:12 | 2018-09-17T07:45:09 | CSS | UTF-8 | Python | false | false | 1,856 | py | import unittest
from datetime import datetime
import django
from django.test import TransactionTestCase
class TestAttribute(TransactionTestCase):
@classmethod
def setUpClass(cls):
super(TestAttribute, cls).setUpClass()
django.setup()
@unittest.skip
def test_datatype_to_data_object(self):
self.fail()
@unittest.skip
def test_exists_by_label(self):
self.fail()
@unittest.skip
def test_exists(self):
self.fail()
@unittest.skip
def test_assert_data_type(self):
self.fail()
def test_all_instances(self):
from metadata.tests import LoadTestData
schema = LoadTestData.init_foaf()
from metadata.models import SchemaNode, SchemaAttribute
obj = SchemaNode(label="test", schema=schema)
obj.save()
from metadata.models import BaseAttribute, Node
from metadata.models import FileAttribute
from metadata.models import ImageAttribute
for InstanceType in set(BaseAttribute.get_all_instance_types()) - {FileAttribute,
ImageAttribute}:
data_type = InstanceType.get_data_type()
att = SchemaAttribute(
label="test_%s" % str(data_type),
data_type=data_type,
object=obj,
)
att.save()
obj_inst = Node(base=obj)
obj_inst.save()
value = data_type(2011, 4, 3) if data_type is datetime else data_type()
att_inst = InstanceType(
value=value,
base=att,
object=obj_inst
)
att_inst.save()
instances = BaseAttribute.get_all_instances_from_base(att)
self.assertListEqual([att_inst], instances)
| [
"[email protected]"
] | |
be4958352ebe1cd5a7ed7e0e5a5a17eb690f7a54 | 9e26e9b8e1e0f7bbf116fdf5c92b4e3006d385ef | /user/views.py | a70640e853d683e97575775ec1e3f0e5b67f3000 | [] | no_license | talhajubair100/e-commerce | fd77877b877a91a447e9b17ab58a4d59cc1456df | e7620b0971489ce558a97dd8d9988562663fe560 | refs/heads/main | 2023-02-15T08:57:29.308811 | 2021-01-06T18:25:35 | 2021-01-06T18:25:35 | 317,015,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,539 | py | from django.contrib.auth.forms import PasswordChangeForm
from .forms import UserUpdateForm, ProfileUpdateForm
from django.shortcuts import redirect, render
from django.http.response import HttpResponse, HttpResponseRedirect
from product.models import Category, Comment
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.contrib import messages
from .models import UserProfile
from order.models import Order, OrderProduct
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def user_profile(request):
category = Category.objects.all()
current_user = request.user
profile = UserProfile.objects.get(user_id=current_user.id)
context = {'category': category, 'profile': profile}
return render(request, 'user_profile.html', context)
@login_required
def user_update(request):
if request.method == 'POST':
user_form = UserUpdateForm(request.POST, instance=request.user)
profile_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.userprofile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Your account has been update')
return redirect('/user')
else:
category = Category.objects.all()
user_form = UserUpdateForm(instance=request.user)
profile_form = ProfileUpdateForm(instance=request.user.userprofile)
context = {'user_form': user_form, 'profile_form': profile_form, 'category': category}
return render(request, 'user_update.html', context)
@login_required
def user_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) #important
messages.success(request, "Your password was successfully update !")
return HttpResponseRedirect('/user')
else:
messages.error(request, 'Please correct the error.<br>' + str(form.errors))
return HttpResponseRedirect('/user/password')
else:
category = Category.objects.all()
form = PasswordChangeForm(request.user)
context = {'category': category, 'form': form}
return render(request, 'user_password.html', context)
@login_required
def user_orders(request):
category = Category.objects.all()
current_user = request.user
orders = Order.objects.filter(user_id=current_user.id).order_by('-create_at')
context = {'category': category, 'orders': orders}
return render(request, 'user_orders.html', context)
@login_required
def order_details(request, id):
category = Category.objects.all()
current_user = request.user
orders = Order.objects.get(user_id=current_user.id, id=id)
orderitems = OrderProduct.objects.filter(order_id=id)
context = {'category': category, 'orders': orders, 'orderitems': orderitems}
return render(request, 'user_order_detail.html', context)
@login_required
def user_orders_product(request):
category = Category.objects.all()
current_user = request.user
order_product = OrderProduct.objects.filter(user_id=current_user.id).order_by('-id')
context = {'category': category, 'order_product': order_product}
return render(request, 'user_orders_product.html', context)
@login_required
def user_order_product_details(request, id, oid):
category = Category.objects.all()
current_user = request.user
orders = Order.objects.get(user_id=current_user.id, id=oid)
orderitems = OrderProduct.objects.filter(id=id, user_id=current_user.id)
context = {'category': category, 'orderitems': orderitems, 'orders': orders}
return render(request, 'user_order_detail.html', context)
@login_required
def user_comments(request):
category = Category.objects.all()
current_user = request.user
comments = Comment.objects.filter(user_id=current_user.id)
context = {'category': category, 'comments': comments}
return render(request, 'user_comments.html', context)
@login_required
def user_delete_comment(request, id):
current_user = request.user
Comment.objects.filter(id=id ,user_id=current_user.id).delete()
messages.success(request, "Secessfully delete comment....")
return HttpResponseRedirect('user/comments/')
# def login_view(request):
# if request.method == 'POST':
# username = request.POST['username']
# password = request.POST['password']
# user = authenticate(request, username=username, password=password)
# if user is not None:
# login(request, user)
# current_user = request.user
# userprofile = UserProfile.objects.get(user_id=current_user.id)
# request.session['userimage'] = userprofile.image.url
# return HttpResponseRedirect("/")
# else:
# messages.warning(request, "Login Error !! Username or Password is incorrect")
# return HttpResponseRedirect("/login")
# category = Category.objects.all()
# context = {'category': category}
# return render (request, 'login.html', context)
def logout_view(request):
logout(request)
return HttpResponseRedirect("/")
# def signup_view(request):
# if request.method == "POST":
# form = SignUpForm(request.POST)
# if form.is_valid():
# form.save() #signup complete here
# # this code for auto login
# username = form.cleaned_data.get('username')
# password = form.cleaned_data.get('password1')
# user = authenticate(username=username, password=password)
# login(request, user)
# # Create data in profile table for user
# current_user = request.user
# data=UserProfile()
# data.user_id=current_user.id
# data.image="media/users/user.jpg"
# data.save()
# messages.success(request, 'Your account has been created!')
# return HttpResponseRedirect("/login/")
# else:
# messages.warning(request,form.errors)
# return HttpResponseRedirect('/signup')
# form = SignUpForm()
# category = Category.objects.all()
# context = {'category': category, 'form': form}
# return render (request, 'signup.html', context)
| [
"[email protected]"
] | |
8a809d0d0ffedb0a224620d12bc9ab3749e4ff9e | 5f7c40860e584cb86e140d0819397a75f93e28ea | /motorcycles.py | 3f2b9124a2a2adecfbcef98ba88d8e0607f78df7 | [] | no_license | bailijiang/Python_exercise | cb149db1bc770fc684bf461ebae1b6dd53bf10a5 | 744a2a3b44036495ae28ec87fea98c04d008b099 | refs/heads/master | 2021-09-02T03:30:21.512752 | 2017-12-29T23:39:05 | 2017-12-29T23:39:05 | 114,254,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | __author__ = 'Bryan'
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
motorcycles[0] = 'ducati'
print(motorcycles)
motorcycles.append('ducati')
print(motorcycles)
motorcycles.insert(1, 'BRZ')
print(motorcycles)
del motorcycles[0]
print(motorcycles)
poped_motocycles = motorcycles.pop()
print(poped_motocycles)
first_owned = motorcycles.pop(0)
print(first_owned.title())
motorcycles.remove('suzuki')
print(motorcycles) | [
"[email protected]"
] | |
fe5700984481c97ee479bdc3980dd0df676e42cf | 9658ec133fd777d349487d7f06df74e4c99f5613 | /src/microplot/explorer/examples/demo_mono_bitmap.py | 1606e08cf9d539ddfa43668d0769153d2c67744e | [
"MIT"
] | permissive | Palmbear/microplot | 6a6d0fc0be7318d95974c8541b2db2cf4390b6e8 | 9cb07c1f1a231b81f4517417c8a3a0fff6aeb3b1 | refs/heads/master | 2023-07-01T22:59:50.577556 | 2021-03-19T15:42:14 | 2021-03-19T15:42:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | import math
from plotter import Plotter
from plots import LinePlot
def run():
sines = list(math.sin(math.radians(x))
for x in range(0, 361, 5))
plot = LinePlot([sines],'MicroPlot line')
plotter = Plotter()
plot.plot(plotter)
plotter.write_mono_bitmap('demo-mono.bmp')
| [
"[email protected]"
] | |
660d07bde49e51d20f8d3401e32430d4edd6b1ee | 0f36de8254443fd6bf0ae5fe0c1dc1a3bda619d2 | /devel/.private/differential_robot_185104iaib/lib/python2.7/dist-packages/differential_robot_185104iaib/msg/_counter_message.py | 34df98f8e6c0f454f8f33569e3add0f1ab2278c6 | [] | no_license | alekal1/ros | 88c5d2a1b013e36b4acecfc5f131ce0113088bc6 | e17174b023f60aab8982195a6427e569b01ab9d0 | refs/heads/master | 2023-01-24T03:39:57.639016 | 2020-11-21T20:01:26 | 2020-11-21T20:01:26 | 296,635,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,007 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from differential_robot_185104iaib/counter_message.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class counter_message(genpy.Message):
_md5sum = "9acad0024d496a45d7194e5310734a3c"
_type = "differential_robot_185104iaib/counter_message"
_has_header = False # flag to mark the presence of a Header object
_full_text = """
int32 count_left
int32 count_right
"""
__slots__ = ['count_left','count_right']
_slot_types = ['int32','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
count_left,count_right
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(counter_message, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.count_left is None:
self.count_left = 0
if self.count_right is None:
self.count_right = 0
else:
self.count_left = 0
self.count_right = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2i().pack(_x.count_left, _x.count_right))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 8
(_x.count_left, _x.count_right,) = _get_struct_2i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2i().pack(_x.count_left, _x.count_right))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 8
(_x.count_left, _x.count_right,) = _get_struct_2i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2i = None
def _get_struct_2i():
global _struct_2i
if _struct_2i is None:
_struct_2i = struct.Struct("<2i")
return _struct_2i
| [
"[email protected]"
] | |
516036f8a0bc3f47d5d771b46101d15d00bbf14f | 22295cda10cf11472fee987093e0b245f6f96ef3 | /nick/twophoton/imag003_depth_report_signals.py | 40243b4470c47a81c063c4888295fcf8c03f4476 | [] | no_license | sjara/jaratest | aecb9e3bcc1ff91db35e7cd551c0f4f3da0b690a | 09bf2c76bd5bf45191a2c37c14171ae1e8902c4b | refs/heads/master | 2023-08-11T09:55:17.684814 | 2023-08-03T22:03:31 | 2023-08-03T22:03:31 | 63,100,718 | 2 | 5 | null | 2023-04-11T18:14:08 | 2016-07-11T20:43:04 | Python | UTF-8 | Python | false | false | 1,259 | py | import os
import numpy as np
from scipy import io
from jaratoolbox import loadbehavior
from jaratoolbox import extraplots
from matplotlib import pyplot as plt
from skimage.external import tifffile
dataDir = '/home/nick/data/2pdata/imag003/'
# sessionsToPlot = [0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 14, 15, 17, 18, 19]
sessionsToPlot = [8]
# session = '002_019'
for ses in sessionsToPlot:
Fn = 'imag003_002_{0:03d}_rigid.signals.mat'.format(ses)
#Read the file with the extracted signals
sigMat = os.path.join(dataDir, Fn)
sigData = io.loadmat(sigMat)
#Get number of frames and extracted ROIs
signals = sigData['sig']
nFrames, nROIs = np.shape(signals)
minSig = np.min(signals.ravel())
maxSig = np.max(signals.ravel())
sdSig = np.std(signals.ravel())
timebase = np.arange(nFrames)
plt.clf()
for indROI in range(nROIs):
yOffset = (4*sdSig) * indROI
plt.plot(timebase, signals[:,indROI]+yOffset, 'k')
plt.title('imag003_002_{0:03d}'.format(ses))
plt.xlabel('Frame')
extraplots.boxoff(plt.gca(), yaxis=False)
plt.gca().set_yticks([])
# plt.show()
plt.tight_layout()
plt.show()
# plt.savefig('/home/nick/data/2pdata/depthReportFigs/{0:03d}.png'.format(ses))
| [
"[email protected]"
] | |
269bf66d37d33754342bddb0248ac1f47af381c5 | 0246c0ded95e8cfb1139159c267f6c024ca83f7a | /5kyu_rgb_to_hex_conversion.py | 1296ce04e6bd1bdea4d07c74e38cbcc1fec3c027 | [] | no_license | cromox1/KodPerang_kata | e40a4f2c3f4eba029caabc3312236aac1bea43bd | 242effc71fef40c0c9a7dc46653845a20da5f239 | refs/heads/main | 2023-01-21T00:18:00.232300 | 2020-11-26T12:13:17 | 2020-11-26T12:13:17 | 312,572,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | def rgb(r, g, b):
int_to_hex = lambda number: "%0.2X" % number if (0 <= number <= 255) else '00' if number < 0 else 'FF'
return int_to_hex(r) + int_to_hex(g) + int_to_hex(b)
# def int_to_hex(num):
# if num <= 0:
# num = 0
# elif num >= 255:
# num = 255
# return "%0.2X" % num
class Test:
def assert_equals(value, expected):
from nose.tools import assert_equal
try:
assert_equal(value, expected)
print('EQUAL --> v =', value, " == x =", expected)
except:
message = ' // # ' + str(value) + ' should == ' + str(expected)
print('UNEQUAL!! --> v =', value, " != x =", expected, message)
@classmethod
def describe(cls, param):
print(param)
# Test.assert_equals(rgb(0,0,0),"000000", "testing zero values")
# Test.assert_equals(rgb(1,2,3),"010203", "testing near zero values")
# Test.assert_equals(rgb(255,255,255), "FFFFFF", "testing max values")
# Test.assert_equals(rgb(254,253,252), "FEFDFC", "testing near max values")
# Test.assert_equals(rgb(-20,275,125), "00FF7D", "testing out of range values")
Test.assert_equals(rgb(0,0,0),"000000")
Test.assert_equals(rgb(1,2,3),"010203")
Test.assert_equals(rgb(255,255,255), "FFFFFF")
Test.assert_equals(rgb(254,253,252), "FEFDFC")
Test.assert_equals(rgb(-20,275,125), "00FF7D")
Test.assert_equals(rgb(148, 0, 211), "9400D3")
Test.assert_equals(rgb(254,253,300), "FEFDFF")
| [
"[email protected]"
] | |
38236755eccebc4e6beb4ad2d215acf36bb72eaa | d7386e4c2e9a1c9c8543239702c9f36f98ec7431 | /Scrapy/test01/test01/settings.py | c6bc7a4f85f533c2b18cb940074b15d02a2998b5 | [] | no_license | SalingerMa/Spider | 6f36fee328d592fa56625c49b40df60074acb227 | de1137585dcd44a47cfd10f8aabec53663f7cba1 | refs/heads/master | 2021-07-11T11:47:07.960418 | 2021-06-23T16:03:42 | 2021-06-23T16:03:42 | 163,784,688 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,072 | py | # -*- coding: utf-8 -*-
# Scrapy settings for test01 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'test01'
SPIDER_MODULES = ['test01.spiders']
NEWSPIDER_MODULE = 'test01.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'test01 (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'test01.middlewares.Test01SpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'test01.middlewares.Test01DownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'test01.pipelines.Test01Pipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
d57bb865836da6b50b08d1e0795f50aef737cded | 74f0c966d09786f447ad60bf837ea342cb405874 | /neutron/db/sqlalchemyutils.py | b720554d8bfe626a674b13cc47b1c6054d97fa9f | [
"Apache-2.0"
] | permissive | mrwukang/neutron | ad354d19a6ba4ec9a92b4e54d02cf1bbfd66e47e | ebdb2ad1213eaf09c6a3f061a94ff4453c3e7506 | refs/heads/master | 2020-04-11T18:24:09.601969 | 2019-01-02T15:42:05 | 2019-01-02T15:42:05 | 161,997,228 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,493 | py | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from six import moves
import sqlalchemy
from sqlalchemy.orm import properties
from neutron._i18n import _
from neutron.common import exceptions as n_exc
LOG = logging.getLogger(__name__)
def paginate_query(query, model, limit, sorts, marker_obj=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort key, specified by sorts.
(If sort keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort key, this would be easy: sort_key > X.
With a compound-values sort key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
The reason of didn't use OFFSET clause was it don't scale, please refer
discussion at https://lists.launchpad.net/openstack/msg02547.html
We also have to cope with different sort directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sorts: array of attributes and direction by which results should
be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if not sorts:
return query
# A primary key must be specified in sort keys
assert not (limit and
len(set(dict(sorts).keys()) &
set(model.__table__.primary_key.columns.keys())) == 0)
# Add sorting
for sort_key, sort_direction in sorts:
sort_dir_func = sqlalchemy.asc if sort_direction else sqlalchemy.desc
try:
sort_key_attr = getattr(model, sort_key)
except AttributeError:
# Extension attribute doesn't support for sorting. Because it
# existed in attr_info, it will be caught here
msg = _("%s is invalid attribute for sort_key") % sort_key
raise n_exc.BadRequest(resource=model.__tablename__, msg=msg)
if isinstance(sort_key_attr.property, properties.RelationshipProperty):
msg = _("The attribute '%(attr)s' is reference to other "
"resource, can't used by sort "
"'%(resource)s'") % {'attr': sort_key,
'resource': model.__tablename__}
raise n_exc.BadRequest(resource=model.__tablename__, msg=msg)
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker_obj:
marker_values = [getattr(marker_obj, sort[0]) for sort in sorts]
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i, sort in enumerate(sorts):
crit_attrs = [(getattr(model, sorts[j][0]) == marker_values[j])
for j in moves.range(i)]
model_attr = getattr(model, sort[0])
if sort[1]:
crit_attrs.append((model_attr > marker_values[i]))
else:
crit_attrs.append((model_attr < marker_values[i]))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit:
query = query.limit(limit)
return query
| [
"[email protected]"
] | |
fa882c3cca9a31dfa9f0468d57cc0eba36f3d1b4 | a9f4434d3b410886ffc10aa5aede3634692152b6 | /0218/ex_if4.py | 7c7e8186913a879087edcca4ecb26393f4434e91 | [] | no_license | parka01/python_ex | d3690dcd8753864c335bf7782553719a072bd01d | a5811487516eb9ef86d5ae93e9060cac267b87ce | refs/heads/main | 2023-03-13T08:35:03.837790 | 2021-02-26T03:40:41 | 2021-02-26T03:40:41 | 339,892,972 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | age=int(input('나이를 입력하세요: '))
if age>=65:
print('지하철 경로 우대 승차권 발급')
else:
print('일반 승차권 발급')
print('자동 발매기를 이용해주셔서 감사합니다.') | [
"[email protected]"
] | |
facce1dd0ae0e979ec432e8a3d4697e93750f24b | 312ab41033c2cb043d617d3e633c166503fd280c | /Informatikk/Bachelor/H2017/ITGK/Eksamensøving/Øving 6/Mynter.py | 473758343370ecab300a3a1f0300f96a8752c55d | [] | no_license | skanin/NTNU | cb9b833d9de0d504965979584370b8f353435cd1 | e4023856f69060f8d3d09ff4990e29f7a75d98b1 | refs/heads/master | 2022-01-30T14:31:22.947512 | 2022-01-20T14:11:14 | 2022-01-20T14:11:14 | 113,476,017 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | def count_coins(coins):
summen = 0
for coin in coins:
summen += coin
return summen
def num_coins(numbers):
coins = []
temp = []
for num in numbers:
if num > 10:
enere = num % 10
tiere = num // 10
if tiere % 2 == 0:
tjue = tiere
tiere = 0
else:
tjue = num // 20
femere = num // 5
temp.append(tjue)
temp.append(tiere)
temp.append(femere)
temp.append(enere)
coins.append(temp)
return coins
print(num_coins([63, 55]))
| [
"[email protected]"
] | |
5816095d02686182c4cc3f9e02f65d91965b2fe6 | e6f1137903b9658e5e3c1ee51201a931894303b9 | /util/melt/layers/__init__.py | 4324748862ecebf1960d5f5dae2b09531e22e1a5 | [] | no_license | fword/hasky | 8ed69ef85bb34823d9ade27bb3b19aac02872440 | d3c680ffa04f7487b931a5575977798157b42b7e | refs/heads/master | 2021-01-23T01:18:49.275631 | 2017-03-18T13:01:27 | 2017-03-18T13:01:27 | 85,898,744 | 1 | 1 | null | 2017-03-23T02:39:06 | 2017-03-23T02:39:06 | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
# ==============================================================================
# \file __init__.py
# \author chenghuige
# \date 2016-08-16 16:36:38.289129
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from melt.layers.layers import *
from melt.layers.optimizers_backward_compat import *
#TODO
#if int(tf.__version__.split('.')[1]) > 10:
# from melt.layers.optimizers import *
#else:
# from melt.layers.optimizers_backward_compat import *
| [
"[email protected]"
] | |
12773f151707596988078c66d295a28579590999 | 6515dee87efbc5edfbf4c117e262449999fcbb50 | /eet/Distant_Barcodes.py | 9c536919456a6b4b5ac94f5b3db75a3e8443f3df | [] | no_license | wangyunge/algorithmpractice | 24edca77e180854b509954dd0c5d4074e0e9ef31 | 085b8dfa8e12f7c39107bab60110cd3b182f0c13 | refs/heads/master | 2021-12-29T12:55:38.096584 | 2021-12-12T02:53:43 | 2021-12-12T02:53:43 | 62,696,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | """
In a warehouse, there is a row of barcodes, where the ith barcode is barcodes[i].
Rearrange the barcodes so that no two adjacent barcodes are equal. You may return any answer, and it is guaranteed an answer exists.
Example 1:
Input: barcodes = [1,1,1,2,2,2]
Output: [2,1,2,1,2,1]
Example 2:
Input: barcodes = [1,1,1,1,2,2,3,3]
Output: [1,3,1,3,1,2,1,2]
"""
# DFS
class Solution(object):
def rearrangeBarcodes(self, barcodes):
"""
:type barcodes: List[int]
:rtype: List[int]
"""
# count
table = {}
for bar in barcodes:
cnt = table.get(bar, 0)
table[bar] = cnt + 1
def _dfs(path, left):
if len(path) == len(barcodes):
return path
for key, cnt in left.items():
if cnt > 0 and key != path[-1]:
new_table = left
new_table[key] -= 1
_dfs(path+[key], new_table)
res = _dfs(['0'], table)
return res[1:]
# Priority Queue
class Solution(object):
def rearrangeBarcodes(self, barcodes):
"""
:type barcodes: List[int]
:rtype: List[int]
"""
# Task Completion
#Fill the most frequency first with every (len(s) / most_freq)-1 positions
| [
"[email protected]"
] | |
5f6c2cc23bfa6be883b2211c1cbe82dfdc70fee1 | 7b437e095068fb3f615203e24b3af5c212162c0d | /enaml/qt/qt_menu.py | eed0ca7ad1bdec1ef6ddb3227edc9944a2341163 | [
"BSD-3-Clause"
] | permissive | ContinuumIO/enaml | d8200f97946e5139323d22fba32c05231c2b342a | 15c20b035a73187e8e66fa20a43c3a4372d008bd | refs/heads/master | 2023-06-26T16:16:56.291781 | 2013-03-26T21:13:52 | 2013-03-26T21:13:52 | 9,047,832 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,532 | py | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QMenu
from atom.api import Typed
from enaml.widgets.menu import ProxyMenu
from .qt_action import QtAction
from .qt_action_group import QtActionGroup
from .qt_toolkit_object import QtToolkitObject
class QCustomMenu(QMenu):
""" A custom subclass of QMenu which adds some convenience apis.
"""
def __init__(self, *args, **kwargs):
""" Initialize a QCustomMenu.
Parameters
----------
*args, **kwargs
The positional and keyword arguments needed to initialize
a QMenu.
"""
super(QCustomMenu, self).__init__(*args, **kwargs)
self._is_context_menu = False
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _onShowContextMenu(self, pos):
""" A private signal handler for displaying the context menu.
This handler is connected to the context menu requested signal
on the parent widget when this menu is marked as a context
menu.
"""
parent = self.parentWidget()
if parent is not None:
global_pos = parent.mapToGlobal(pos)
self.exec_(global_pos)
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def isContextMenu(self):
""" Whether this menu acts as a context menu for its parent.
Returns
-------
result : bool
True if this menu acts as a context menu, False otherwise.
"""
return self._is_context_menu
def setContextMenu(self, context):
""" Set whether this menu acts as a context menu for its parent.
Parameters
----------
context : bool
True if this menu should act as a context menu, False
otherwise.
"""
old_context = self._is_context_menu
self._is_context_menu = context
if old_context != context:
parent = self.parentWidget()
if parent is not None:
handler = self._onShowContextMenu
if context:
parent.setContextMenuPolicy(Qt.CustomContextMenu)
parent.customContextMenuRequested.connect(handler)
else:
parent.setContextMenuPolicy(Qt.DefaultContextMenu)
parent.customContextMenuRequested.disconnect(handler)
def removeActions(self, actions):
""" Remove the given actions from the menu.
Parameters
----------
actions : iterable
An iterable of QActions to remove from the menu.
"""
remove = self.removeAction
for action in actions:
remove(action)
class QtMenu(QtToolkitObject, ProxyMenu):
""" A Qt implementation of an Enaml ProxyMenu.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QCustomMenu)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying menu widget.
"""
self.widget = QCustomMenu(self.parent_widget())
def init_widget(self):
""" Initialize the widget.
"""
super(QtMenu, self).init_widget()
d = self.declaration
self.set_title(d.title)
self.set_enabled(d.enabled)
self.set_visible(d.visible)
self.set_context_menu(d.context_menu)
def init_layout(self):
""" Initialize the layout of the widget.
"""
super(QtMenu, self).init_layout()
widget = self.widget
for child in self.children():
if isinstance(child, QtMenu):
widget.addMenu(child.widget)
elif isinstance(child, QtAction):
widget.addAction(child.widget)
elif isinstance(child, QtActionGroup):
widget.addActions(child.actions())
#--------------------------------------------------------------------------
# Child Events
#--------------------------------------------------------------------------
def find_next_action(self, child):
""" Get the QAction instance which follows the child.
Parameters
----------
child : QtToolkitObject
The child of interest.
Returns
-------
result : QAction or None
The QAction which comes immediately after the actions of the
given child, or None if no actions follow the child.
"""
found = False
for dchild in self.children():
if found:
if isinstance(dchild, QtMenu):
return dchild.widget.menuAction()
if isinstance(dchild, QtAction):
return dchild.widget
if isinstance(dchild, QtActionGroup):
acts = dchild.actions()
if len(acts) > 0:
return acts[0]
else:
found = dchild is child
def child_added(self, child):
""" Handle the child added event for a QtMenu.
"""
super(QtMenu, self).child_added(child)
if isinstance(child, QtMenu):
before = self.find_next_action(child)
self.widget.insertMenu(before, child.widget)
elif isinstance(child, QtAction):
before = self.find_next_action(child)
self.widget.insertAction(before, child.widget)
elif isinstance(child, QtActionGroup):
before = self.find_next_action(child)
self.widget.insertActions(before, child.actions())
def child_removed(self, child):
""" Handle the child removed event for a QtMenu.
"""
super(QtMenu, self).child_removed(child)
if isinstance(child, QtMenu):
self.widget.removeAction(child.widget.menuAction())
elif isinstance(child, QtAction):
self.widget.removeAction(child.widget)
elif isinstance(child, QtActionGroup):
self.widget.removeActions(child.actions())
#--------------------------------------------------------------------------
# ProxyMenu API
#--------------------------------------------------------------------------
def set_title(self, title):
""" Set the title of the underlying widget.
"""
self.widget.setTitle(title)
def set_visible(self, visible):
""" Set the visibility on the underlying widget.
"""
self.widget.menuAction().setVisible(visible)
def set_enabled(self, enabled):
""" Set the enabled state of the widget.
"""
self.widget.setEnabled(enabled)
def set_context_menu(self, context):
""" Set whether or not the menu is a context menu.
"""
self.widget.setContextMenu(context)
| [
"[email protected]"
] | |
75c0f023556c19ce152dd2ed15092cba945a4cb9 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_enchant.py | 30a2682435ccb97a29ce664927597299802bbdb4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py |
#calss header
class _ENCHANT():
def __init__(self,):
self.name = "ENCHANT"
self.definitions = [u'to attract or please someone very much: ', u'to have a magical effect on someone or something']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
8b4ecf37197f1bf3a416459742b7d0638c031bea | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-airec/aliyunsdkairec/request/v20181012/ListScenesRequest.py | e28838ca25f5a9c7bf5a3172116db39b3d94cbdb | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,647 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkairec.endpoint import endpoint_data
class ListScenesRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Airec', '2018-10-12', 'ListScenes','airec')
self.set_uri_pattern('/openapi/instances/[InstanceId]/scenes')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_path_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_path_param('InstanceId',InstanceId)
def get_Status(self):
return self.get_query_params().get('Status')
def set_Status(self,Status):
self.add_query_param('Status',Status) | [
"[email protected]"
] | |
dd825169a7e52a6ad884d02fdd6500cf7257a189 | 3ea99519e25ec1bb605947a94b7a5ceb79b2870a | /modern_python/modernpython/lib/mypy/typeshed/stdlib/3.4/tracemalloc.pyi | 462b03ca1c46a82c921e5591f87aaf07cc326d2c | [] | no_license | tech-cow/spazzatura | 437c7502a0654a3d3db2fd1e96ce2e3e506243c0 | 45fc0932186d2ef0c5044745a23507a692cfcc26 | refs/heads/master | 2022-09-01T12:01:11.309768 | 2018-11-15T04:32:03 | 2018-11-15T04:32:03 | 130,414,653 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,328 | pyi | # Stubs for tracemalloc (Python 3.4+)
import sys
from typing import Any, List, Optional, Sequence, Tuple, Union
def clear_traces() -> None: ...
def get_object_traceback(obj: object) -> Optional[Traceback]: ...
def get_traceback_limit() -> int: ...
def get_traced_memory() -> Tuple[int, int]: ...
def get_tracemalloc_memory() -> int: ...
def is_tracing() -> bool: ...
def start(nframe: int = ...) -> None: ...
def stop() -> None: ...
def take_snapshot() -> Snapshot: ...
if sys.version_info >= (3, 6):
class DomainFilter:
inclusive = ... # type: bool
domain = ... # type: int
def __init__(self, inclusive: bool, domain: int) -> None: ...
class Filter:
if sys.version_info >= (3, 6):
domain = ... # type: Optional[int]
inclusive = ... # type: bool
lineno = ... # type: Optional[int]
filename_pattern = ... # type: str
all_frames = ... # type: bool
def __init__(self, inclusive: bool, filename_pattern: str, lineno: Optional[int] = ..., all_frames: bool = ..., domain: Optional[int] = ...) -> None: ...
class Frame:
filename = ... # type: str
lineno = ... # type: int
class Snapshot:
def compare_to(self, old_snapshot: Snapshot, key_type: str, cumulative: bool = ...) -> List[StatisticDiff]: ...
def dump(self, filename: str) -> None: ...
if sys.version_info >= (3, 6):
def filter_traces(self, filters: Sequence[Union[DomainFilter, Filter]]) -> Snapshot: ...
else:
def filter_traces(self, filters: Sequence[Filter]) -> Snapshot: ...
@classmethod
def load(cls, filename: str) -> Snapshot: ...
def statistics(self, key_type: str, cumulative: bool = ...) -> List[Statistic]: ...
traceback_limit = ... # type: int
traces = ... # type: Sequence[Trace]
class Statistic:
count = ... # type: int
size = ... # type: int
traceback = ... # type: Traceback
class StatisticDiff:
count = ... # type: int
count_diff = ... # type: int
size = ... # type: int
size_diff = ... # type: int
traceback = ... # type: Traceback
class Trace:
size = ... # type: int
traceback = ... # type: Traceback
class Traceback(Sequence[Frame]):
def format(self, limit: Optional[int] = ...) -> List[str]: ...
| [
"[email protected]"
] | |
69d1f1d6c3c6c4c911b19d88c4953d469307148f | 1e0ae1f039668a65e480065d671235fc0fff9b52 | /s11day2/backend/b2.py | bb0b3891a46e924deb3ce5866e6a53261cb4ac70 | [] | no_license | aixocm/svndata | a4da91c3c9e1d376abfd46e7cecc3c5c2e340e83 | ee205301f3a1ce11acef98bba927877cb7c4fb0b | refs/heads/master | 2021-01-21T04:39:41.607117 | 2016-07-01T01:48:36 | 2016-07-01T01:48:36 | 47,066,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,613 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#date:2016-1-11
import time
import hashlib
import pickle
import ConfigParser
import day
info = {}
def regiest():
global info
while True:
username = raw_input('please input your username:')
passwd = raw_input('please input your password:')
if not (username and passwd):
print 'your username or password is empty'
continue
else:
with open('user.txt','a+') as f:
string = f.read()
if string == "":
hash = hashlib.md5('JJ')
hash.update(passwd)
ret = hash.hexdigest()
info[username]=[ret,15000]
f = open('user.txt','wb')
pickle.dump(info,f)
f.close()
print 'regiest is sucessful!'
day.month_fun(username)
# dayday.dayday_fun(username)
else:
f=open('user.txt','rb')
info = pickle.load(f)
f.close()
if username in info.keys():
print 'This user is already exist!'
continue
else:
hash = hashlib.md5('JJ')
hash.update(passwd)
ret = hash.hexdigest()
info[username]=[ret,15000]
f = open('user.txt','ab')
pickle.dump(info,f)
f.close()
print 'regiest is sucessful!'
day.month_fun(username)
# dayday.dayday_fun(username)
def login():
global info
global username
f=open('user.txt','rb')
info = pickle.load(f)
f.close()
username = raw_input('please input your name:')
passwd = raw_input('please input your password:')
if username not in info.keys():
print 'please regiest!'
regiest()
else:
hash = hashlib.md5('JJ')
hash.update(passwd)
ret = hash.hexdigest()
if username in info.keys() and ret in info[username][0]:
print 'login successful!'
return True
else:
print 'login is failure'
return False
def get_money(username):
global info
if info[username][1] < 0:
print 'sorry,please the money'
else:
num = int(raw_input('please input your money num:'))
if info[username][1] - num*1.05 >= 0:
info[username][1] -= num*1.05
f = open('user.txt','wb')
pickle.dump(info,f)
f.close()
print 'get money is sucessful!'
else:
print 'sorry,you get money is too much'
def return_money(username):
global info
value_add=int(raw_input('please input your money:'))
with open('add.txt','a+') as f:
value=int(f.read())
with open('add.txt','wb') as f:
f.write(value+value_add)
info[username][1]=15000+value_add+value-Sum
f = open('user.txt','wb')
pickle.dump(info,f)
f.close()
print '已充值!'
def account_list(username):
global info
global Sum
with open('add.txt','a+') as f:
value=f.read()
if value == "":
value=0
with open('add.txt','wb') as f:
f.write(value)
config = ConfigParser.ConfigParser()
config.read(username)
month_list=config.sections()
Sum=0
for month in month_list:
key_list=config.items(month)
print month,"的账单如下:"
for opt in key_list:
print opt[0],":",opt[1]
Sum+=config.getint(month,opt[0])
if 15000 - Sum + int(value) < 0:
print '你已经欠款',15000 - Sum + int(value)
k=raw_input('if you add your money,please input A')
if k == 'A':
return_money(username)
else:
print 'your input is error'
else:
info[username][1]-=Sum
f = open('user.txt','wb')
pickle.dump(info,f)
f.close()
def M
flag = raw_input("if you regiest,please input 'R',if you login,please input 'L':")
if flag == 'R':
regiest()
elif flag == 'L':
if login():
select = raw_input("get money input G,query your account_list input Q:")
if select == 'G':
get_money(username)
if select =='Q':
if int(time.time()) > 0:
account_list(username)
else:
print 'date is do not to'
else:
print 'your input is error'
Main()
| [
"[email protected]"
] | |
8436d7f1670a2ef115032f4ab329e743426fbc8a | f08d137b7821d79672c91e5f06967ffa1f90e278 | /.history/Python/Main_py_20211020222353.py | b9f2db00c2e594197b452733fbbbb1d47c75cb3b | [] | no_license | anhviet-key/hello-cac-ban | a39ffb1731a77dd171523ea145f5d8b62fccde7c | 18411b51add7e3277d42869f8a50c67111337983 | refs/heads/main | 2023-08-23T09:02:01.074958 | 2021-10-27T07:48:47 | 2021-10-27T07:48:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | from Sub_py import num2
print("Hello, Word")
num2()
| [
"[email protected]"
] | |
ded09e44634a6c69a66cad4017e63a15258b6b35 | 67d4025c8b006a4342ce78bf4831d50432a4ed7a | /flow controls/current.py | b858a670d03af0b448bdb3d27cb8ffeb47d14c81 | [] | no_license | Anju-PT/pythonfilesproject | fd21484e98dc6c27fd9fd00feea0ede76ca8db17 | fee8351a1610921700ee15ea88c6f765fe3d21cd | refs/heads/master | 2023-04-29T18:13:46.490301 | 2021-05-19T08:06:51 | 2021-05-19T08:06:51 | 368,791,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | #year
#month
#date
#birth year
#month
#date
#print year
cyear=int(input("enter current year"))
cmonth=int(input("enter current month"))
cdate=int(input("enter current date"))
byear=int(input("enter birth year"))
bmonth=int(input("enter birth month"))
bdate=int(input("enter birth date"))
dyear=cyear-byear
dmonth=cmonth-bmonth
ddate=cdate-bdate
if(dyear>=1):
if(dmonth>=0):
if(ddate>=0):
print("age:",dyear,"years",dmonth,"months",ddate,"days")
else:
dmonth=dmonth-1
ddate=31+ddate
print("age:",dyear,"years",dmonth,"months",ddate,"days")
elif(dmonth<0):
if(ddate>=0):
dyear=dyear-1
dmonth=12+dmonth
#ddate=ddate
print("age:",dyear,"years",dmonth,"months",ddate,"days")
else:
dyear=dyear-1
dmonth-=1
dmonth=12+dmonth
ddate=31+ddate
print("age:",dyear,"years",dmonth,"months",ddate,"days")
else:
print("error")
elif(dyear==0):
if(dmonth<=0):
if(ddate<=0):
print("error")
else:
print(dyear,"years",dmonth,"months",ddate,"days old")
elif(dmonth>0):
if(ddate<=0):
dmonth=dmonth-1
ddate=31+ddate
print(dyear,"years",dmonth,"months",ddate,"days")
else:
print(dyear,"years",dmonth,"months",ddate, "days")
else:
print("error")
else:
print("invalid date of birth") | [
"[email protected]"
] | |
dc151184a7be117c9ba30d0fd60a18f811bcf66d | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/models/run_locally.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/ops/candidate_sampling_ops.py | 7731e73922876cf6de3e9aa25344c6f7d4f4dcf1 | [
"Apache-2.0"
] | permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/ops/candidate_sampling_ops.py | [
"[email protected]"
] | |
abb2c0c6e5d8bcdcb3ecb44cf721b9ea22719323 | 641fa8341d8c436ad24945bcbf8e7d7d1dd7dbb2 | /third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations.py | 2c41cd2a18d5e92fc59baa5618929090f74565d0 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] | permissive | massnetwork/mass-browser | 7de0dfc541cbac00ffa7308541394bac1e945b76 | 67526da9358734698c067b7775be491423884339 | refs/heads/master | 2022-12-07T09:01:31.027715 | 2017-01-19T14:29:18 | 2017-01-19T14:29:18 | 73,799,690 | 4 | 4 | BSD-3-Clause | 2022-11-26T11:53:23 | 2016-11-15T09:49:29 | null | UTF-8 | Python | false | false | 14,619 | py | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generates a fake TestExpectations file consisting of flaky tests from the bot
corresponding to the give port.
"""
import json
import logging
import os.path
import urllib
import urllib2
from webkitpy.layout_tests.models.test_expectations import TestExpectations, PASS
from webkitpy.layout_tests.models.test_expectations import TestExpectationLine
_log = logging.getLogger(__name__)
class ResultsJSON(object):
"""Contains the contents of a results.json file.
results.json v4 format:
{
'version': 4,
'builder name' : {
'blinkRevision': [],
'tests': {
'directory' { # Each path component is a dictionary.
'testname.html': {
'expected' : 'FAIL', # Expectation name.
'results': [], # Run-length encoded result.
'times': [],
'bugs': [], # Bug URLs.
}
}
}
}
'buildNumbers': [],
'secondsSinceEpoch': [],
'chromeRevision': [],
'failure_map': {} # Map from letter code to expectation name.
}
"""
TESTS_KEY = 'tests'
FAILURE_MAP_KEY = 'failure_map'
RESULTS_KEY = 'results'
EXPECTATIONS_KEY = 'expected'
BUGS_KEY = 'bugs'
RLE_LENGTH = 0
RLE_VALUE = 1
# results.json was originally designed to support
# multiple builders in one json file, so the builder_name
# is needed to figure out which builder this json file
# refers to (and thus where the results are stored)
def __init__(self, builder_name, json_dict):
self.builder_name = builder_name
self._json = json_dict
def _walk_trie(self, trie, parent_path):
for name, value in trie.items():
full_path = os.path.join(parent_path, name)
# FIXME: If we ever have a test directory self.RESULTS_KEY
# ("results"), this logic will break!
if self.RESULTS_KEY not in value:
for path, results in self._walk_trie(value, full_path):
yield path, results
else:
yield full_path, value
def walk_results(self, full_path=''):
tests_trie = self._json[self.builder_name][self.TESTS_KEY]
return self._walk_trie(tests_trie, parent_path='')
def expectation_for_type(self, type_char):
return self._json[self.builder_name][self.FAILURE_MAP_KEY][type_char]
# Knowing how to parse the run-length-encoded values in results.json
# is a detail of this class.
def occurances_and_type_from_result_item(self, item):
return item[self.RLE_LENGTH], item[self.RLE_VALUE]
class BotTestExpectationsFactory(object):
RESULTS_URL_PREFIX = (
'http://test-results.appspot.com/testfile?master=chromium.webkit&'
'testtype=webkit_tests&name=results-small.json&builder=')
def __init__(self, builders):
self.builders = builders
def _results_json_for_port(self, port_name, builder_category):
builder = self.builders.builder_name_for_port_name(port_name)
if not builder:
return None
return self._results_json_for_builder(builder)
def _results_json_for_builder(self, builder):
results_url = self.RESULTS_URL_PREFIX + urllib.quote(builder)
try:
_log.debug('Fetching flakiness data from appengine.')
return ResultsJSON(builder, json.load(urllib2.urlopen(results_url)))
except urllib2.URLError as error:
_log.warning('Could not retrieve flakiness data from the bot. url: %s', results_url)
_log.warning(error)
def expectations_for_port(self, port_name, builder_category='layout'):
# FIXME: This only grabs release builder's flakiness data. If we're running debug,
# when we should grab the debug builder's data.
# FIXME: What should this do if there is no debug builder for a port, e.g. we have
# no debug XP builder? Should it use the release bot or another Windows debug bot?
# At the very least, it should log an error.
results_json = self._results_json_for_port(port_name, builder_category)
if not results_json:
return None
return BotTestExpectations(results_json, self.builders)
def expectations_for_builder(self, builder):
results_json = self._results_json_for_builder(builder)
if not results_json:
return None
return BotTestExpectations(results_json, self.builders)
class BotTestExpectations(object):
# FIXME: Get this from the json instead of hard-coding it.
RESULT_TYPES_TO_IGNORE = ['N', 'X', 'Y'] # NO_DATA, SKIP, NOTRUN
# TODO(ojan): Remove this once crbug.com/514378 is fixed.
# The JSON can contain results for expectations, not just actual result types.
NON_RESULT_TYPES = ['S', 'X'] # SLOW, SKIP
# specifiers arg is used in unittests to avoid the static dependency on builders.
def __init__(self, results_json, builders, specifiers=None):
self.results_json = results_json
self.specifiers = specifiers or set(builders.specifiers_for_builder(results_json.builder_name))
def _line_from_test_and_flaky_types(self, test_path, flaky_types):
line = TestExpectationLine()
line.original_string = test_path
line.name = test_path
line.filename = test_path
line.path = test_path # FIXME: Should this be normpath?
line.matching_tests = [test_path]
line.bugs = ["crbug.com/FILE_A_BUG_BEFORE_COMMITTING_THIS"]
line.expectations = sorted(flaky_types)
line.specifiers = self.specifiers
return line
def flakes_by_path(self, only_ignore_very_flaky):
"""Sets test expectations to bot results if there are at least two distinct results."""
flakes_by_path = {}
for test_path, entry in self.results_json.walk_results():
flaky_types = self._flaky_types_in_results(entry, only_ignore_very_flaky)
if len(flaky_types) <= 1:
continue
flakes_by_path[test_path] = sorted(flaky_types)
return flakes_by_path
def unexpected_results_by_path(self):
"""For tests with unexpected results, returns original expectations + results."""
def exp_to_string(exp):
return TestExpectations.EXPECTATIONS_TO_STRING.get(exp, None).upper()
def string_to_exp(string):
# Needs a bit more logic than the method above,
# since a PASS is 0 and evaluates to False.
result = TestExpectations.EXPECTATIONS.get(string.lower(), None)
if not result is None:
return result
raise ValueError(string)
unexpected_results_by_path = {}
for test_path, entry in self.results_json.walk_results():
# Expectations for this test. No expectation defaults to PASS.
exp_string = entry.get(self.results_json.EXPECTATIONS_KEY, u'PASS')
# All run-length-encoded results for this test.
results_dict = entry.get(self.results_json.RESULTS_KEY, {})
# Set of expectations for this test.
expectations = set(map(string_to_exp, exp_string.split(' ')))
# Set of distinct results for this test.
result_types = self._all_types_in_results(results_dict)
# Distinct results as non-encoded strings.
result_strings = map(self.results_json.expectation_for_type, result_types)
# Distinct resulting expectations.
result_exp = map(string_to_exp, result_strings)
expected = lambda e: TestExpectations.result_was_expected(e, expectations, False)
additional_expectations = set(e for e in result_exp if not expected(e))
# Test did not have unexpected results.
if not additional_expectations:
continue
expectations.update(additional_expectations)
unexpected_results_by_path[test_path] = sorted(map(exp_to_string, expectations))
return unexpected_results_by_path
def all_results_by_path(self):
"""Returns all seen result types for each test.
Returns a dictionary from each test path that has a result to a list of distinct, sorted result
strings. For example, if the test results are as follows:
a.html IMAGE IMAGE PASS PASS PASS TIMEOUT PASS TEXT
b.html PASS PASS PASS PASS PASS PASS PASS PASS
c.html
This method will return:
{
'a.html': ['IMAGE', 'TEXT', 'TIMEOUT', 'PASS'],
'b.html': ['PASS'],
}
"""
results_by_path = {}
for test_path, entry in self.results_json.walk_results():
results_dict = entry.get(self.results_json.RESULTS_KEY, {})
result_types = self._all_types_in_results(results_dict)
if not result_types:
continue
# Distinct results as non-encoded strings.
result_strings = map(self.results_json.expectation_for_type, result_types)
results_by_path[test_path] = sorted(result_strings)
return results_by_path
def expectation_lines(self, only_ignore_very_flaky):
lines = []
for test_path, entry in self.results_json.walk_results():
flaky_types = self._flaky_types_in_results(entry, only_ignore_very_flaky)
if len(flaky_types) > 1:
line = self._line_from_test_and_flaky_types(test_path, flaky_types)
lines.append(line)
return lines
def _all_types_in_results(self, run_length_encoded_results):
results = set()
for result_item in run_length_encoded_results:
_, result_types = self.results_json.occurances_and_type_from_result_item(result_item)
for result_type in result_types:
if result_type not in self.RESULT_TYPES_TO_IGNORE:
results.add(result_type)
return results
def _result_to_enum(self, result):
return TestExpectations.EXPECTATIONS[result.lower()]
def _flaky_types_in_results(self, results_entry, only_ignore_very_flaky):
flaky_results = set()
# Always include pass as an expected result. Passes will never turn the bot red.
# This fixes cases where the expectations have an implicit Pass, e.g. [ Slow ].
latest_expectations = [PASS]
if self.results_json.EXPECTATIONS_KEY in results_entry:
expectations_list = results_entry[self.results_json.EXPECTATIONS_KEY].split(' ')
latest_expectations += [self._result_to_enum(expectation) for expectation in expectations_list]
for result_item in results_entry[self.results_json.RESULTS_KEY]:
_, result_types_str = self.results_json.occurances_and_type_from_result_item(result_item)
result_types = []
for result_type in result_types_str:
# TODO(ojan): Remove this if-statement once crbug.com/514378 is fixed.
if result_type not in self.NON_RESULT_TYPES:
result_types.append(self.results_json.expectation_for_type(result_type))
# It didn't flake if it didn't retry.
if len(result_types) <= 1:
continue
# If the test ran as expected after only one retry, it's not very flaky.
# It's only very flaky if it failed the first run and the first retry
# and then ran as expected in one of the subsequent retries.
# If there are only two entries, then that means it failed on the first
# try and ran as expected on the second because otherwise we'd have
# a third entry from the next try.
if only_ignore_very_flaky and len(result_types) == 2:
continue
has_unexpected_results = False
for result_type in result_types:
result_enum = self._result_to_enum(result_type)
# TODO(ojan): We really should be grabbing the expected results from the time
# of the run instead of looking at the latest expected results. That's a lot
# more complicated though. So far we've been looking at the aggregated
# results_small.json off test_results.appspot, which has all the information
# for the last 100 runs. In order to do this, we'd need to look at the
# individual runs' full_results.json, which would be slow and more complicated.
# The only thing we lose by not fixing this is that a test that was flaky
# and got fixed will still get printed out until 100 runs have passed.
if not TestExpectations.result_was_expected(result_enum, latest_expectations, test_needs_rebaselining=False):
has_unexpected_results = True
break
if has_unexpected_results:
flaky_results = flaky_results.union(set(result_types))
return flaky_results
| [
"[email protected]"
] | |
c2f220b3b5f0bf91bfad97fd447ebc7840a3f497 | f04fb8bb48e38f14a25f1efec4d30be20d62388c | /牛客Top200/41最长无重复子数组.py | 925b34c978b77ae75f52c81087d78fbe3d6366fb | [] | no_license | SimmonsChen/LeetCode | d8ef5a8e29f770da1e97d295d7123780dd37e914 | 690b685048c8e89d26047b6bc48b5f9af7d59cbb | refs/heads/master | 2023-09-03T01:16:52.828520 | 2021-11-19T06:37:19 | 2021-11-19T06:37:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | class Solution:
# 用队列:当前数字在队列中,则一直出队直到当前元素不在队中,然后将当前元素入队。
# 每入队一个元素都要计算当前队长度,返回值即是队列长度的最大值。
def maxLength(self, arr):
n = len(arr)
if n < 2: return n
queue = []
ans = 0
for number in arr:
while number in queue:
queue.pop(0)
queue.append(number)
ans = max(ans, len(queue))
return ans
if __name__ == '__main__':
s = Solution()
print(s.maxLength([2, 2, 3, 4, 3]))
| [
"[email protected]"
] | |
ac60c8f095376835613db058072b466b085377e0 | e5ebf8d209c2720fa2cb123541a90d30a0e6c364 | /Web_Program/venv/Scripts/pip3-script.py | f12831ab8c696f096993b5f0c1f66522c71baf1e | [
"Apache-2.0"
] | permissive | Incipe-win/Games | f235482d98fbfb8f98c120e11604973c6115bb79 | 902e6c7631fa75b57cfabf0e8be8f8d8486ec432 | refs/heads/main | 2022-12-25T17:31:21.968194 | 2020-10-09T13:42:53 | 2020-10-09T13:42:53 | 302,651,724 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | #!F:\Repositories\C_Cplusplus\Games\Web_Program\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
df1e1b2efa5dc475ee5b76e61cad469f2164c2bf | 052a89753a7917b7fa0ccdf5718d5250a1379d2c | /bin/explode.py | 792bb4609f5f71183a72a0d26597362977ad7e7b | [] | no_license | bopopescu/aws.example.com | 25e2efda3bd9ae2a257c34904ccb53043fe20b55 | 97254868688c3c3a991843fcacc973c93b366700 | refs/heads/master | 2022-11-22T07:06:30.386034 | 2016-10-25T15:22:14 | 2016-10-25T15:22:14 | 282,553,417 | 0 | 0 | null | 2020-07-26T01:22:26 | 2020-07-26T01:22:25 | null | UTF-8 | Python | false | false | 2,499 | py | #!/Users/deanarmada/Desktop/projects/python-projects/aws.example.com/bin/python
#
# The Python Imaging Library
# $Id$
#
# split an animation into a number of frame files
#
from __future__ import print_function
from PIL import Image
import os
import sys
class Interval(object):
def __init__(self, interval="0"):
self.setinterval(interval)
def setinterval(self, interval):
self.hilo = []
for s in interval.split(","):
if not s.strip():
continue
try:
v = int(s)
if v < 0:
lo, hi = 0, -v
else:
lo = hi = v
except ValueError:
i = s.find("-")
lo, hi = int(s[:i]), int(s[i+1:])
self.hilo.append((hi, lo))
if not self.hilo:
self.hilo = [(sys.maxsize, 0)]
def __getitem__(self, index):
for hi, lo in self.hilo:
if hi >= index >= lo:
return 1
return 0
# --------------------------------------------------------------------
# main program
html = 0
if sys.argv[1:2] == ["-h"]:
html = 1
del sys.argv[1]
if not sys.argv[2:]:
print()
print("Syntax: python explode.py infile template [range]")
print()
print("The template argument is used to construct the names of the")
print("individual frame files. The frames are numbered file001.ext,")
print("file002.ext, etc. You can insert %d to control the placement")
print("and syntax of the frame number.")
print()
print("The optional range argument specifies which frames to extract.")
print("You can give one or more ranges like 1-10, 5, -15 etc. If")
print("omitted, all frames are extracted.")
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
frames = Interval(",".join(sys.argv[3:]))
try:
# check if outfile contains a placeholder
outfile % 1
except TypeError:
file, ext = os.path.splitext(outfile)
outfile = file + "%03d" + ext
ix = 1
im = Image.open(infile)
if html:
file, ext = os.path.splitext(outfile)
html = open(file+".html", "w")
html.write("<html>\n<body>\n")
while True:
if frames[ix]:
im.save(outfile % ix)
print(outfile % ix)
if html:
html.write("<img src='%s'><br>\n" % outfile % ix)
try:
im.seek(ix)
except EOFError:
break
ix += 1
if html:
html.write("</body>\n</html>\n")
| [
"[email protected]"
] | |
36d1293350f5b67bf202eb4d0c02875b0fb70e92 | 63f6ce221f6ac10f33761b6e57cf47725fd3d1cb | /08_Inteligencia_Artificial/01_Mineracao_de_Emocoes_em_Textos/base.py | d5d26c33f96c0e78d5a32f8964b53b1eae32c865 | [] | no_license | iamferreirajp/python-notebook | 7dcc4305fec2a133a28b5449d77d486dcaca2f5f | c9056024bc7a7715db3607ec00f886a7b9eaf0c0 | refs/heads/master | 2020-05-03T09:54:43.612078 | 2019-03-24T07:07:21 | 2019-03-24T07:07:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,330 | py | base = [
('eu sou admirada por muitos','alegria'),
('me sinto completamente amado','alegria'),
('amar e maravilhoso','alegria'),
('estou me sentindo muito animado novamente','alegria'),
('eu estou muito bem hoje','alegria'),
('que belo dia para dirigir um carro novo','alegria'),
('o dia está muito bonito','alegria'),
('estou contente com o resultado do teste que fiz no dia de ontem','alegria'),
('o amor e lindo','alegria'),
('nossa amizade e amor vai durar para sempre', 'alegria'),
('estou amedrontado', 'medo'),
('ele esta me ameacando a dias', 'medo'),
('isso me deixa apavorada', 'medo'),
('este lugar e apavorante', 'medo'),
('se perdermos outro jogo seremos eliminados e isso me deixa com pavor', 'medo'),
('tome cuidado com o lobisomem', 'medo'),
('se eles descobrirem estamos encrencados', 'medo'),
('estou tremendo de medo', 'medo'),
('eu tenho muito medo dele', 'medo'),
('estou com medo do resultado dos meus testes', 'medo')
]
stopwords = [
'a', 'agora', 'algum', 'alguma', 'aquele', 'aqueles', 'de', 'deu', 'do', 'e', 'estou', 'esta', 'esta',
'ir', 'meu', 'muito', 'mesmo', 'no', 'nossa', 'o', 'outro', 'para', 'que', 'sem', 'talvez', 'tem', 'tendo',
'tenha', 'teve', 'tive', 'todo', 'um', 'uma', 'umas', 'uns', 'vou'
] | [
"[email protected]"
] | |
1d985001aa8cb066283ba9bf7d051e057fa13f97 | a204895c89b15eab24d68b5340da440419966bcc | /rea/apps.py | 086161dfe20fc8b3cf22684419423465d07f66bf | [
"MIT"
] | permissive | Ecotrust/regional-equity-atlas | c98855be70aa35cebff48d94d89e2cbd2077cfe2 | 66911330ea29ac6ed2ba5b172e9171f22d995756 | refs/heads/master | 2020-03-23T06:18:13.817698 | 2018-07-31T20:08:02 | 2018-07-31T20:08:02 | 141,202,099 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | from django.apps import AppConfig
class ReaConfig(AppConfig):
name = 'rea'
| [
"[email protected]"
] | |
8064cfc5c46bbf7fac14576f49088c5a516c311e | bc441bb06b8948288f110af63feda4e798f30225 | /patch_manager_sdk/model/collector_center/job_v2_pb2.py | 3368edc9a9c54ad07768d21bca07f442ac5ce755 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 18,224 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: job_v2.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from patch_manager_sdk.model.collector_center import script_pb2 as patch__manager__sdk_dot_model_dot_collector__center_dot_script__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='job_v2.proto',
package='collector_center',
syntax='proto3',
serialized_options=_b('ZJgo.easyops.local/contracts/protorepo-models/easyops/model/collector_center'),
serialized_pb=_b('\n\x0cjob_v2.proto\x12\x10\x63ollector_center\x1a\x35patch_manager_sdk/model/collector_center/script.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xe9\x05\n\tCollJobV2\x12\n\n\x02id\x18\x01 \x01(\t\x12\x10\n\x08interval\x18\x02 \x01(\x05\x12\x0f\n\x07timeout\x18\x03 \x01(\x05\x12\x11\n\ttimeRange\x18\x04 \x01(\t\x12\x13\n\x0bjobFilePath\x18\x05 \x03(\t\x12\x0f\n\x07version\x18\x06 \x01(\t\x12\x32\n\x06target\x18\x07 \x01(\x0b\x32\".collector_center.CollJobV2.Target\x12\x0c\n\x04name\x18\x08 \x01(\t\x12(\n\x06script\x18\t \x01(\x0b\x32\x18.collector_center.Script\x12\x0e\n\x06\x64\x61taId\x18\n \x01(\x05\x12#\n\x03\x65nv\x18\x0b \x01(\x0b\x32\x16.google.protobuf.Value\x12&\n\x06kwargs\x18\x0c \x01(\x0b\x32\x16.google.protobuf.Value\x12\x0b\n\x03\x66un\x18\r \x01(\t\x12\x0f\n\x07\x63lazzId\x18\x0e \x01(\t\x12\x11\n\tclazzName\x18\x0f \x01(\t\x12\x10\n\x08\x63onfigId\x18\x10 \x01(\t\x12\x16\n\x0erequiredFields\x18\x11 \x03(\t\x12\x10\n\x08\x63\x61\x63heTtl\x18\x12 \x01(\x05\x12\x15\n\rignoreInvalid\x18\x13 \x01(\x08\x12\x0e\n\x06labels\x18\x14 \x03(\t\x12\x10\n\x08\x64isabled\x18\x15 \x01(\x08\x12\x0b\n\x03org\x18\x16 \x01(\x05\x12\x0f\n\x07\x63reator\x18\x17 \x01(\t\x12\x10\n\x08modifier\x18\x18 \x01(\t\x12\r\n\x05\x63time\x18\x19 \x01(\x05\x12\r\n\x05mtime\x18\x1a \x01(\x05\x12\x10\n\x08objectId\x18\x1b \x01(\t\x12\x12\n\ninstanceId\x18\x1c \x01(\t\x1a\x90\x01\n\x06Target\x12\n\n\x02id\x18\x01 \x01(\t\x12?\n\tagentHost\x18\x02 \x01(\x0b\x32,.collector_center.CollJobV2.Target.AgentHost\x1a\x39\n\tAgentHost\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\n\n\x02ip\x18\x02 \x01(\t\x12\x0c\n\x04uuid\x18\x03 \x01(\tBLZJgo.easyops.local/contracts/protorepo-models/easyops/model/collector_centerb\x06proto3')
,
dependencies=[patch__manager__sdk_dot_model_dot_collector__center_dot_script__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_COLLJOBV2_TARGET_AGENTHOST = _descriptor.Descriptor(
name='AgentHost',
full_name='collector_center.CollJobV2.Target.AgentHost',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='collector_center.CollJobV2.Target.AgentHost.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ip', full_name='collector_center.CollJobV2.Target.AgentHost.ip', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uuid', full_name='collector_center.CollJobV2.Target.AgentHost.uuid', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=808,
serialized_end=865,
)
_COLLJOBV2_TARGET = _descriptor.Descriptor(
name='Target',
full_name='collector_center.CollJobV2.Target',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='collector_center.CollJobV2.Target.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='agentHost', full_name='collector_center.CollJobV2.Target.agentHost', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_COLLJOBV2_TARGET_AGENTHOST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=721,
serialized_end=865,
)
_COLLJOBV2 = _descriptor.Descriptor(
name='CollJobV2',
full_name='collector_center.CollJobV2',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='collector_center.CollJobV2.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='interval', full_name='collector_center.CollJobV2.interval', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timeout', full_name='collector_center.CollJobV2.timeout', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timeRange', full_name='collector_center.CollJobV2.timeRange', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='jobFilePath', full_name='collector_center.CollJobV2.jobFilePath', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='collector_center.CollJobV2.version', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target', full_name='collector_center.CollJobV2.target', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='collector_center.CollJobV2.name', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='script', full_name='collector_center.CollJobV2.script', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataId', full_name='collector_center.CollJobV2.dataId', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='env', full_name='collector_center.CollJobV2.env', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kwargs', full_name='collector_center.CollJobV2.kwargs', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fun', full_name='collector_center.CollJobV2.fun', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clazzId', full_name='collector_center.CollJobV2.clazzId', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clazzName', full_name='collector_center.CollJobV2.clazzName', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='configId', full_name='collector_center.CollJobV2.configId', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='requiredFields', full_name='collector_center.CollJobV2.requiredFields', index=16,
number=17, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cacheTtl', full_name='collector_center.CollJobV2.cacheTtl', index=17,
number=18, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ignoreInvalid', full_name='collector_center.CollJobV2.ignoreInvalid', index=18,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='collector_center.CollJobV2.labels', index=19,
number=20, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disabled', full_name='collector_center.CollJobV2.disabled', index=20,
number=21, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='collector_center.CollJobV2.org', index=21,
number=22, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='collector_center.CollJobV2.creator', index=22,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='modifier', full_name='collector_center.CollJobV2.modifier', index=23,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='collector_center.CollJobV2.ctime', index=24,
number=25, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mtime', full_name='collector_center.CollJobV2.mtime', index=25,
number=26, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectId', full_name='collector_center.CollJobV2.objectId', index=26,
number=27, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='collector_center.CollJobV2.instanceId', index=27,
number=28, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_COLLJOBV2_TARGET, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=120,
serialized_end=865,
)
_COLLJOBV2_TARGET_AGENTHOST.containing_type = _COLLJOBV2_TARGET
_COLLJOBV2_TARGET.fields_by_name['agentHost'].message_type = _COLLJOBV2_TARGET_AGENTHOST
_COLLJOBV2_TARGET.containing_type = _COLLJOBV2
_COLLJOBV2.fields_by_name['target'].message_type = _COLLJOBV2_TARGET
_COLLJOBV2.fields_by_name['script'].message_type = patch__manager__sdk_dot_model_dot_collector__center_dot_script__pb2._SCRIPT
_COLLJOBV2.fields_by_name['env'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_COLLJOBV2.fields_by_name['kwargs'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
DESCRIPTOR.message_types_by_name['CollJobV2'] = _COLLJOBV2
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CollJobV2 = _reflection.GeneratedProtocolMessageType('CollJobV2', (_message.Message,), {
'Target' : _reflection.GeneratedProtocolMessageType('Target', (_message.Message,), {
'AgentHost' : _reflection.GeneratedProtocolMessageType('AgentHost', (_message.Message,), {
'DESCRIPTOR' : _COLLJOBV2_TARGET_AGENTHOST,
'__module__' : 'job_v2_pb2'
# @@protoc_insertion_point(class_scope:collector_center.CollJobV2.Target.AgentHost)
})
,
'DESCRIPTOR' : _COLLJOBV2_TARGET,
'__module__' : 'job_v2_pb2'
# @@protoc_insertion_point(class_scope:collector_center.CollJobV2.Target)
})
,
'DESCRIPTOR' : _COLLJOBV2,
'__module__' : 'job_v2_pb2'
# @@protoc_insertion_point(class_scope:collector_center.CollJobV2)
})
_sym_db.RegisterMessage(CollJobV2)
_sym_db.RegisterMessage(CollJobV2.Target)
_sym_db.RegisterMessage(CollJobV2.Target.AgentHost)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
63ab6446bacc30dd89798f16300dc8f1a0ff5314 | bffd457e17dc250c81d7bd9e25c20a483f1a1ed5 | /pandatools/results/ResultMerge_vehicle.py | a79cdd8a1a92074001b014b03305d8cb1a2fdca4 | [] | no_license | Harzva/gigavision | 3121f183b8cfc66f9c89f4afe1bd0bdf1c1fe104 | 1fb1ad9b9d5aac6c18dc83184a52c484964be7fb | refs/heads/master | 2023-02-20T10:34:21.735085 | 2021-01-24T10:07:30 | 2021-01-24T10:07:30 | 332,416,570 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 31,144 | py | # --------------------------------------------------------
# Result merge modules for PANDA
# Written by Wang Xueyang ([email protected]), Version 20200321
# Inspired from DOTA dataset devkit (https://github.com/CAPTAIN-WHU/DOTA_devkit)
# --------------------------------------------------------
from nms import py_cpu_nms,py_cpu_softnms,set_cpu_nms
from d2det.ops.nms.nms_wrapper import soft_nms
import os
import numpy as np
from tqdm import tqdm
import json
from collections import defaultdict
import random
from demo_visual import merge_result_visual
from concurrent.futures.thread import ThreadPoolExecutor
from ensemble_boxes import nms
import json
import os
try:
import xml.etree.cElementTree as ET #解析xml的c语言版的模块
except ImportError:
import xml.etree.ElementTree as ET
class DetResMerge():
def __init__(self,
resfile,
splitannofile,
srcannofile,
outpath,
npyname,
test,
imgfilters,
isload_npy=False,
imgext='.jpg',
code='utf-8',
ext =2 # Pixel extension
):
"""
:param basepath: base directory for panda image data and annotations
:param resfile: detection result file path
:param splitannofile: generated split annotation file
:param srcannofile: source annotation file
:param resmode: detection result mode, which can be 'person', 'vehicle', 'headbbox' or 'headpoint'
:param outpath: output base path for merged result file
:param outfile: name for merged result file
:param imgext: ext for the split image format
"""
self.resfile = resfile
self.splitannofile = splitannofile
self.srcannofile = srcannofile
self.imgfilters=imgfilters
self.outpath = outpath
self.imgext = imgext
self.code = code
self.respath = resfile
self.test=test
self.subimg_width = 1536
self.subimg_height = 1536
self.splitannopath = splitannofile
self.srcannopath = srcannofile
# self.imagepaths = util.GetFileFromThisRootDir(self.imgpath, ext='jpg')
if not os.path.exists(self.outpath):
os.makedirs(self.outpath)
self.results = defaultdict(list)
self.npyname=npyname
self.ext = ext
self.isload_npy=isload_npy
if self.isload_npy:
print(f"load /root/data/gvision/CrowdDet-master/model/rcnn_emd_refine/outputs/coco_results/{self.npyname}.npy")
self.results =np.load(f"/root/data/gvision/CrowdDet-master/model/rcnn_emd_refine/outputs/coco_results/{self.npyname}.npy",allow_pickle=True)
else:
self.indexResults()
def keep_dets(self,dets,UP_boundary,temp):
imgWidth,imgHeight=temp[0],temp[1]
'''
:dets: frame_id,left,up,scale,x,y,w,h,score,_ columns=10
:return dets in small frame [left,up,scale,x,y,w,h,score,_] columns = 9
'''
keep_dets = []
keep_values=[]
print(dets[0])
_,left,up,scale,_,_,_,_,_,_,_ = dets[0]
right = left + int(self.subimg_width/scale)
down = up + int(self.subimg_height/scale)
if up == UP_boundary:
if left == 0: # left_up_corner ============
for det in dets:
_,left,up,scale,x,y,w,h,score,number,category_id= det
if x+w >= self.subimg_width-self.ext or y+h >= self.subimg_height-self.ext:# if is out of right or down bound
continue
else:
keep_values.append([left,up,scale])
keep_dets.append( {'image_id':number, 'bbox': [x,y,w,h], 'score':score, 'category_id': category_id})
elif right >= imgWidth-1: # right_up_corner ============
for det in dets:
_,left,up,scale,x,y,w,h,score,number,category_id= det
if x <= 0+self.ext or y+h >= self.subimg_height-self.ext:# if is out of left or down bound
continue
else:
keep_values.append([left,up,scale])
keep_dets.append( {'image_id':number, 'bbox': [x,y,w,h], 'score':score, 'category_id': category_id})
else: # up_bound ============
for det in dets:
_,left,up,scale,x,y,w,h,score,number,category_id= det
if x <= 0+self.ext or y+h >= self.subimg_height-self.ext or x+w >= self.subimg_width-self.ext:# if is out of left or down or right bound
continue
else:
keep_values.append([left,up,scale])
keep_dets.append( {'image_id':number, 'bbox': [x,y,w,h], 'score':score, 'category_id': category_id})
elif left == 0:
if down >= imgHeight-10: # left_down_corner ============
for det in dets:
_,left,up,scale,x,y,w,h,score,number,category_id= det
if y <= 0+self.ext or x+w >= self.subimg_width-self.ext: # if is out of up or right bound
continue
else:
keep_values.append([left,up,scale])
keep_dets.append( {'image_id':number, 'bbox': [x,y,w,h], 'score':score, 'category_id': category_id})
else: # left_bound ============
for det in dets:
_,left,up,scale,x,y,w,h,score,number,category_id= det
if y <= 0+self.ext or x+w >= self.subimg_width-self.ext or y+h >= self.subimg_height-self.ext:# if is out of up or right or down bound
continue
else:
keep_values.append([left,up,scale])
keep_dets.append( {'image_id':number, 'bbox': [x,y,w,h], 'score':score, 'category_id': category_id})
elif down >= imgHeight-10: ####################################
if right >= imgWidth-1: # right_down_corner ============
for det in dets:
_,left,up,scale,x,y,w,h,score,number,category_id= det
if x <= 0+self.ext or y <= 0+self.ext:
continue
else:
keep_values.append([left,up,scale])
keep_dets.append( {'image_id':number, 'bbox': [x,y,w,h], 'score':score, 'category_id': category_id})
else: # down_bound ============
for det in dets:
_,left,up,scale,x,y,w,h,score,number,category_id= det
if x <= 0+self.ext or y <= 0+self.ext or x+w >= self.subimg_width-self.ext:
continue
else:
keep_values.append([left,up,scale])
keep_dets.append( {'image_id':number, 'bbox': [x,y,w,h], 'score':score, 'category_id': category_id})
elif right >= imgWidth-1: # right_broud ============
for det in dets:
_,left,up,scale,x,y,w,h,score,number,category_id= det
if x <= 0+self.ext or y <= 0+self.ext or y+h >= self.subimg_height-self.ext:
continue
else:
keep_values.append([left,up,scale])
keep_dets.append( {'image_id':number, 'bbox': [x,y,w,h], 'score':score, 'category_id': category_id})
else: # inner_part ============
for det in dets:
_,left,up,scale,x,y,w,h,score,number,category_id= det
if x <= 0+self.ext or y <= 0+self.ext or x+w >= self.subimg_width-self.ext or y+h >= self.subimg_height-self.ext: ##################################
continue
else:
keep_values.append([left,up,scale])
keep_dets.append( {'image_id':number, 'bbox': [x,y,w,h], 'score':score, 'category_id': category_id})
if len(keep_dets)>0:
return keep_dets,keep_values
else:
return None
def indexResults(self):
print('Loading result json file: {}'.format(self.respath))
with open(self.respath, 'r') as load_f:
reslist = json.load(load_f)
print("bboxex_num",len(reslist))#498
print('Loading split annotation json file: {}'.format(self.splitannopath))
with open(self.splitannopath, 'r') as load_f:
splitanno = json.load(load_f)
indexedresults = defaultdict(list)
if self.test:
tempannos={}
imgfilters=self.imgfilters
# imgfilters=["15_24"]
for imgfilter in imgfilters:
tempannos.update({i:j for i,j in splitanno.items() if imgfilter in i })
splitanno=tempannos
def say(iss):
filename, annodict=iss[0],iss[1]
imageid = annodict['image id']
# print("imageid",imageid)
for resdict in reslist:
resimageid = resdict['image_id']
if resimageid == imageid:
# print("1111",resdict) {'image_id': 253, 'category_id': 1, 'bbox': [981.3349609375, 322.8221435546875, 22.030517578125, 32.01666259765625], 'score': 0.16039377450942993}
# print("2222",resimageid)
# print("1111",type(resdict))
# print("2222",type(resimageid))
indexedresults[filename].append(resdict)
return indexedresults
# print("splitanno",splitanno)
executor = ThreadPoolExecutor(max_workers=1)
func_var = [[file_name,dict_value] for file_name,dict_value in splitanno.items()]
pbar = tqdm(total=len(splitanno), ncols=50)
for temp in executor.map(say,func_var):
# print(temp)
indexedresults.update(temp)
pbar.update(1)
pbar.close()
self.results = indexedresults
np.save(f"/root/data/gvision/CrowdDet-master/model/rcnn_emd_refine/outputs/coco_results/{self.npyname}.npy",indexedresults )
print("save ***index.npy*** as :",f"/root/data/gvision/CrowdDet-master/model/rcnn_emd_refine/outputs/coco_results/{self.npyname}.npy")
def mergeResults(self,outfile,merge_input_mode="xywh",is_nms=True,nms_thresh=0.9,nms_name="nms"):
"""
:param is_nms: do non-maximum suppression on after merge
:param nms_thresh: non-maximum suppression IoU threshold
:return:
"""
print('Loading source annotation json file: {}'.format(self.srcannopath))
with open(self.srcannopath, 'r') as load_f:
srcanno = json.load(load_f)
mergedresults = defaultdict(list)
# random.seed(0)
# print("len",len(self.results))
keep_input=[]
for (filename, objlist) in self.results.items():
# for (filename, objlist) in random.sample(self.results.items(),2):
# srcfile, scale, left, up = filename.split('___')
# srcfile =srcfile.replace('_IMG', '/IMG')+".jpg"
# up =up[:-4]
# # print(filename, objlist)
# # srcimageid =srcfile[-2:]
# # print(srcfile)
# srcimageid = srcanno[srcfile]['image id']
# print("srcimageid",srcimageid)
# print(srcfile, scale, left, up )
srcfile, paras = filename.split('___')#srcfile, paras 15_Nanshani_Park_IMG_15_04 0.5__4224__6144.jpg
# print("srcfile, paras",srcfile, paras )
srcfile = srcfile.replace('_IMG', '/IMG') + self.imgext#02_Xili_Crossroad_IMG_02_01___0.5__0__0.jpg
srcimageid = srcanno[srcfile]['image id']
scale, left, up = paras.replace(self.imgext, '').split('__')#scale, left, up 0.5 4224 6144
# print(srcfile, scale, left, up )
# print(f"before objlist {len(objlist)}")
if not iskeep_dets:
for objdict in objlist:
mergedresults[srcimageid].append([*recttransfer(objdict['bbox'], float(scale), int(left), int(up),merge_input_mode),objdict['score'], objdict['category_id'],objdict['image_id']])
if iskeep_dets:
keep_input=keep_input+[[srcimageid,int(left), int(up),float(scale),i['bbox'][0],i['bbox'][1],i['bbox'][2],i['bbox'][3],i['score'],i['image_id'],i['category_id']] for i in objlist]
# keep_input.append([[srcimageid,int(left), int(up),float(scale),i['bbox'][0],i['bbox'][1],i['bbox'][2],i['bbox'][3],i['score'],i['image_id'],i['category_id']] for i in objlist])
if iskeep_dets:
print(f"before keep {len(keep_input)}")
if 391<=srcimageid<=420:#14otcUP_boundary
keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[0],PANDA_TEST_SIZE[0])
if 421<=srcimageid<=450:#15 nanshangongyuan
keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[1],PANDA_TEST_SIZE[1])
if 451<=srcimageid<=465:#16xiaoxue----------01
keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[2],PANDA_TEST_SIZE[2])
if 466<=srcimageid<=480:#16xiaoxue--------02
keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[3],PANDA_TEST_SIZE[2])
if 481<=srcimageid<=510:#17zhongguan
keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[4],PANDA_TEST_SIZE[3])
if 511<=srcimageid<=540:#18xilin-------01
keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[5],PANDA_TEST_SIZE[4])
if 541<=srcimageid<=555:#18xilin----------02
keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[6],PANDA_TEST_SIZE[5])
print(f"after keep {len(keep_dets)}")
for objdict,keep_value in zip(keep_dets,_keep_values):
left, up,scale=keep_value[0],keep_value[1],keep_value[2]
# print("objdict",objdict)#{'image_id': 7110, 'bbox': [47.7, 866.2, 198.8, 442.8], 'score': 0.83231, 'category_id': 1}
mergedresults[srcimageid].append([*recttransfer(objdict['bbox'], float(scale), int(left), int(up),merge_input_mode),
objdict['score'], objdict['category_id'],objdict['image_id']])
img_size = {}
for anno in srcanno:
# print(srcanno[anno]['image id'])
img_size[srcanno[anno]['image id']] = srcanno[anno]['image size']
if is_nms:
if nms_name=="nms":
for (imageid, objlist) in mergedresults.items():
masxlist=[i[2]*i[3] for i in objlist]
max_wh=np.max(masxlist)
objlist=[[i[0],i[1],i[2],i[3],i[4]*0.05+i[3]*i[2]*0.95/max_wh,i[5],i[6]] for i in objlist ]
keep = py_cpu_nms(np.array(objlist), nms_thresh)
outdets = []
for index in keep:
outdets.append(objlist[index])
mergedresults[imageid] = outdets
if nms_name=="setnms":
for (imageid, objlist) in mergedresults.items():
print("input nms element",objlist[0])#[829, 5939, 923, 6000, 0.24672751128673553, 1, 149]
print(f"before setnms {nms_thresh} ",len(objlist))
keep=np.array(objlist)[set_cpu_nms(np.array(objlist), nms_thresh)].tolist()
# print("keep",keep,"\n",len(keep),type(keep))
print(f"{imageid} after setnms_{nms_thresh} ",len(keep))
mergedresults[imageid] = keep
if nms_name=="emnms":
for (imageid, objlist) in mergedresults.items():
size_anno = img_size[imageid]
boxes = [[obj[0] / size_anno['width'], obj[1] / size_anno['height'],
obj[2] / size_anno['width'], obj[3] / size_anno['height']] for obj in objlist]
scores = [obj[4] for obj in objlist]
labels = [obj[5] for obj in objlist]
boxes, scores, labels = nms([boxes], [scores], [labels])
boxes[:, [0, 2]] *= size_anno['width']
boxes[:, [1, 3]] *= size_anno['height']
outdets = [x[0] + [x[1], x[2]] for x in zip(boxes.tolist(), scores.tolist(), labels.tolist())]
mergedresults[imageid] = outdets
if nms_name=="softnms":
for (imageid, objlist) in mergedresults.items():
print(f"{imageid} before softnms_{nms_thresh} ",len(objlist))
# print("ssss",len(objlist[0]))
# print("ssss",objlist[0])
masxlist=[i[2]*i[3] for i in objlist]
max_wh=np.max(masxlist)
objlist=[[i[0],i[1],i[2],i[3],i[4]*0.05+i[3]*i[2]*0.95/max_wh,i[5],i[6]] for i in objlist ]
# tempmax=np.max(np.array(objlist)[:, 4])
# print("max",tempmax)#208909381.05317593
# objlist=[[i[0],i[1],i[2],i[3],i[4]/(tempmax+0.00001),i[5],i[6]] for i in objlist ]
# print(objlist)
newdets,keep=soft_nms(np.array(objlist),iou_thr=nms_thresh, method='linear',sigma=0.5, min_score=1e-3)#'gaussian''linear',
# keep =py_cpu_softnms(np.array(objlist),thresh=nms_thresh, Nt=0.02, sigma=0.5, method=1)
# print(keep)
outdets = []
for index in keep:
outdets.append(objlist[index])
print(f"{imageid} after softnms_{nms_thresh} ",len(keep))
mergedresults[imageid] = outdets
savelist = []
def say2(iss):
imageid, objlist=iss[0],iss[1]
# print(imageid, objlist)
templist=[]
for obj in objlist:#obj [22528, 1270, 24576, 1, 1.0, 4]
# print(obj)
templist.append({
"image_id": imageid,
"category_id": obj[5],
"bbox": tlbr2tlwh(obj[:4]),
"score": obj[4]
})
if test:
print(f"fliter berfore len {len(templist)}")
if isfliter:
if 391<=imageid<=420:#14otc
templist=fliter(templist,fliterscore["14_OCT"],AnotPath="/root/data/gvision/dataset/xml/14_OCT_Habour.xml",
segma_woh=3,segma_area=3,up_bound=4000,down_bound=None,down_fs=0.95,yichang=0)
if 421<=imageid<=450:#15 nanshangongyuan
templist=fliter(templist,fliterscore["15_nanshan"],AnotPath="/root/data/gvision/dataset/xml/15_Nanshani_Park.xml",
segma_woh=3,segma_area=3,up_bound=1500,down_bound=7000,down_fs=None,yichang=0)
if 451<=imageid<=465:#16xiaoxue----------01
templist=fliter(templist,fliterscore["1601_shool"],AnotPath="/root/data/gvision/dataset/xml/IMG_16_01_head.xml",
segma_woh=3,segma_area=3,up_bound=0,down_bound=None,down_fs=None,yichang=0)
if 466<=imageid<=480:#16xiaoxue--------02
templist=fliter(templist,fliterscore["1602_shool"],AnotPath="/root/data/gvision/dataset/xml/IMG_16_25_02_.xml",
segma_woh=3,segma_area=3,up_bound=0,down_bound=None,down_fs=None,yichang=0)
if 481<=imageid<=510:#17zhongguan
templist=fliter(templist,fliterscore["17_newzhongguan"],AnotPath="/root/data/gvision/dataset/xml/17_New_Zhongguan.xml",
segma_woh=3,segma_area=3,up_bound=6000,down_bound=7000,down_fs=None,yichang=0)
if 511<=imageid<=540:#18xilin-------01
templist=fliter(templist,fliterscore["1801_xilin"],AnotPath="/root/data/gvision/dataset/xml/IMG_18_01_01.xml",
segma_woh=3,segma_area=3,up_bound=4000,down_bound=None,down_fs=None,yichang=0)
if 541<=imageid<=555:#18xilin----------02
templist=fliter(templist,fliterscore["1802_xilin"],AnotPath="/root/data/gvision/dataset/xml/IMG_18_02.xml",
segma_woh=3,segma_area=3,up_bound=4000,down_bound=None,down_fs=None,yichang=0)
if isdel_inter:
templist=del_inter(templist)
if test:
print(f"del_inter after len {len(templist)}")
return templist
executor = ThreadPoolExecutor(max_workers=80)
func_var = [[file_name,dict_value] for file_name,dict_value in mergedresults.items()]
print("merge bbox into self'image start ")
pbar2= tqdm(total=len(mergedresults), ncols=50)
for temp in executor.map(say2,func_var):
savelist+=temp
pbar2.update(1)
pbar2.close()
with open(os.path.join(self.outpath, outfile), 'w', encoding=self.code) as f:
dict_str = json.dumps(savelist, indent=2)
f.write(dict_str)
print(f"save ***results*** json :{os.path.join(self.outpath, outfile)}")
def fliter(rectange_list,fliterscore,AnotPath,segma_woh,segma_area,up_bound,down_bound,down_fs,yichang):
print("fliter start before ",len(rectange_list))
if yichang:
dis_woh=[i['bbox'][3]/(i['bbox'][2]+0.0000001) for i in rectange_list]
dis_area=[i['bbox'][3]*i['bbox'][2] for i in rectange_list]
u_woh=np.mean(dis_woh)
std_woh=np.std(dis_woh)*segma_woh
u_area=np.mean(dis_area)
std_area=np.std(dis_area)*segma_area
# print("fliter outlier before ",len(rectange_list))
rectange_list=[i for i in rectange_list if (u_woh-std_woh<i['bbox'][3]/(i['bbox'][2]+0.0000001)<u_woh+std_woh and u_area-std_area<i['bbox'][3]/(i['bbox'][2]+0.0000001)<u_area+std_area and i['bbox'][1]>up_bound)]
rectange_list=[i for i in rectange_list if i['score'] >fliterscore]
if test:
print("fliter outlier after ",len(rectange_list))
rectange_list=GetAnnotBoxLoc(AnotPath,rectange_list)
if down_bound and down_fs:
rectange_list=[i for i in rectange_list if (i['bbox'][1]>down_bound and i['score']>down_fs) or i['bbox'][1]<down_bound]
if test:
print("fliter end after",len(rectange_list))
return rectange_list
def del_inter(rectange_list):#AnotPath VOC标注文件路径
backlist=[]
# print(f"forbid zone before {len(rectange_list)}")
# pbar3= tqdm(total=len(rectange_list), ncols=50)
for a in rectange_list:
i=a["bbox"]
left,up,right,down=int(i[0]),int(i[1]),int(i[0]+i[2]),int(i[3]+i[1])
templist=[]
inter_xml=np.zeros(len(rectange_list),dtype=float)
for id,k in enumerate(rectange_list):
j=k["bbox"]
xmin,ymin,xmax,ymax=int(j[0]),int(j[1]),int(j[0]+j[2]),int(j[3]+j[1])
if xmax <= left or right <= xmin or ymax <= up or down <= ymin:
intersection = 0
else:
lens = min(xmax, right) - max(xmin, left)
wide = min(ymax, down) - max(ymin, up)
intersection = lens * wide
# print("*"*6,intersection)
# print(i[2]*i[3])
inter_xml[id]=intersection/(i[2]*i[3]+0.00001)
# print(np.where(inter_xml<0.99)[0].shape[0])
if np.where(inter_xml<0.99999)[0].shape[0]==len(rectange_list)-1:
backlist.append(a)
# pbar3.update(1)
# pbar3.close()
# else:
# print(np.where(inter_xml==0)[0].shape[0]==len(ObjectSet))
# print("del")
# print(f"forbid zone after {len(backlist)}")
# return backlist+templist
return backlist
def GetAnnotBoxLoc(AnotPath,rectange_list):#AnotPath VOC标注文件路径
tree = ET.ElementTree(file=AnotPath) #打开文件,解析成一棵树型结构
root = tree.getroot()#获取树型结构的根
ObjectSet=root.findall('object')#找到文件中所有含有object关键字的地方,这些地方含有标注目标
backlist=[]
# print(f"forbid zone before {len(rectange_list)}")
for a in rectange_list:
i=a["bbox"]
left,up,right,down=i[0],i[1],i[0]+i[2],i[3]+i[1]
templist=[]
inter_xml=np.zeros(len(ObjectSet),dtype=float)
for k,Object in enumerate(ObjectSet):
BndBox=Object.find('bndbox')
xmin= int(BndBox.find('xmin').text)#-1 #-1是因为程序是按0作为起始位置的
ymin= int(BndBox.find('ymin').text)#-1
xmax= int(BndBox.find('xmax').text)#-1
ymax= int(BndBox.find('ymax').text)#-1
templist.append({
"image_id": 481,
"category_id": 1,
"bbox": [xmin,ymin,xmax-xmin,ymax-ymin],
"score":1
})
if xmax <= left or right <= xmin or ymax <= up or down <= ymin:
intersection = 0
else:
lens = min(xmax, right) - max(xmin, left)
wide = min(ymax, down) - max(ymin, up)
intersection = lens * wide
# print("*"*60,intersection)
# print(i[2]*i[3])
inter_xml[k]=intersection/(i[2]*i[3]+0.00001)
if np.where(inter_xml<0.05)[0].shape[0]==len(ObjectSet):#则没有与bbox相交的xmlforbidzone < param or ==0
backlist.append(a)
# else:
# print(np.where(inter_xml==0)[0].shape[0]==len(ObjectSet))
# print("del")
# print(f"forbid zone after {len(backlist)}")
# return backlist+templist17_newzhongguan
return backlist
def recttransfer(rect, scale, left, up,merge_input_mode):
if merge_input_mode=="xyxy":
xmin, ymin, xmax, ymax = rect
if merge_input_mode=="xywh":
xmin, ymin, w, h = rect
xmax, ymax = xmin + w, ymin + h
# return [int(temp / scale) for temp in [xmin + left, ymin + up, xmax + left, ymax + up]]
return [int(temp) for temp in [xmin/scale + left, ymin/scale + up, xmax/scale + left, ymax/scale + up]]
def tlbr2tlwh(rect):
xmin, ymin, xmax, ymax = rect
w, h = xmax - xmin, ymax - ymin
return [xmin, ymin, w, h]
def vehicle():
"""
global variable
"""
global PANDA_TEST_SIZE,fliterscore,UP_boundary,iskeep_dets,test,isdel_inter,isfliter
PANDA_TEST_SIZE=[
[26573,15052], # 9
[32609,24457], # 10
[31760,23810],
[26583,14957],
[26583,14957],
[26573,15052]]
UP_boundary=[4500,1417,0,0,5357,4205,52]
#是否use keep_dets()
iskeep_dets=0
isfliter=1#fliter score xml
isdel_inter=1#
test=0
fliterscore={"14_OCT":0.0,"15_nanshan":0,"1601_shool":0.2,"1602_shool":0.3,"17_newzhongguan":0,"1801_xilin":0.1,"1802_xilin":0}
nms_name="nms"
nms_thresh=0.2
network="detectors"
weights=""
cls_="vehicle"#head
dataset="else"
resfile="/root/data/gvision/mmdetection-master/workdir/detectors_vehicle_method2/output/coco_bicycle_and_panda_else/split_reuslt_detectros_else_bbox.json"
# resfile="/root/data/gvision/mmdetection-master/workdir/detectors_vehicle_method2/output/coco_bicycle_and_panda_else/split_reuslt_detectros_else_big_bbox.json"
# resfile="/root/data/gvision/mmdetection-master/workdir/detectors_vehicle_method2/output/epoch_12_car/split_reuslt_detectros_car_big_bbox.json"
# resfile="/root/data/gvision/mmdetection-master/workdir/detectors_vehicle_method2/output/epoch_12_car/split_reuslt_detectros_car_17_bbox.json"
# resfile="/root/data/gvision/mmdetection-master/workdir/detectors_vehicle_method2/output/epoch_12_car/split_reuslt_detectros_car_without17_bbox.json"
splitannofile='/root/data/rubzz/ruby/ruby_output/test/vehicle/method3_bbox_small_else_noresize.json'
# splitannofile='/root/data/rubzz/ruby/ruby_output/test/person/split_test_method2_bigimageto1536.json'
# splitannofile="/root/data/rubzz/ruby/ruby_output/test/vehicle/test_bbox_vehicle_1333_1238_17_noresize.json"
# splitannofile="/root/data/rubzz/ruby/ruby_output/test/vehicle/test_bbox_vehicle_1333_1238_without17.json"
# imgfilters=["14_02","16_02" ,"16_14","16_16","15_24","17_02" ,"18_40","18_01","17_01"]
# imgfilters=["15_24","14_02","17_01","17_011","17_015","17_26","17_27"]
imgfilters=["15_27","14_02","16_01","17_23","18_01"]
outfile=f"{network}{weights}_delinter{isdel_inter}isfliter{isfliter}{cls_}dataset{dataset}{nms_name}{nms_thresh}.json"#save image prefix
outfile="car_without_17.json"
image_prefix=f'delinter{isdel_inter}isfliter{isfliter}{nms_name}{nms_thresh}'
savepath=f"/root/data/gvision/my_merge/{cls_}/visual/{network}_{dataset}"
#softnms nms setnms emnms
outpath=f"/root/data/gvision/my_merge/{cls_}/coco_results/detectors"
merge=DetResMerge(resfile=resfile,outpath=outpath,
# splitannofile="/root/data/rubzz/ruby/ruby_output/test/person/split_test_method2_person.json",
splitannofile=splitannofile,
srcannofile="/root/data/gvision/dataset/raw_data/image_annos/person_bbox_test.json",
# srcannofile="/root/data/gvision/dataset/predict/17_01/image_annos/1701.json",
npyname=outfile[:-4]+f"{nms_name}",
test=test,
imgfilters=imgfilters,
isload_npy=False,
)
merge.mergeResults(outfile=outfile,is_nms=True,nms_thresh=nms_thresh,nms_name=nms_name)
# merge.mergeResults(merge_input_mode="xywh",is_nms=True,nms_thresh=0.9)
merge_result_visual(image_folder_test="/root/data/gvision/dataset/raw_data/image_test",
result_path=os.path.join(outpath,outfile),
annos_path="/root/data/gvision/dataset/raw_data/image_annos/person_bbox_test.json",
savepath=savepath,
output_prefix=image_prefix,
imgfilters=imgfilters,
test=test,
mode="xywh",
num=10)#mode: input_bbox mode
def main():
vehicle()
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
73cb855b2bd5a9631ecd288feca60ceac271a3a9 | 291ab4b5b1b99d0d59ce2fb65efef04b84fd78bd | /tmp_testdir/postgresDB/test5b_manual_dbtable_insert_date_time.py | ca1b04857efca706986da6e210cb68cdea5f6927 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | cromox1/Trading212 | 15b5ea55d86e7063228f72dd92525e1fca693338 | 68f9b91098bc9184e16e9823a5e07e6b31e59602 | refs/heads/main | 2023-04-17T23:03:07.078229 | 2021-05-05T23:02:54 | 2021-05-05T23:02:54 | 320,100,427 | 0 | 2 | null | 2021-04-13T07:03:41 | 2020-12-09T22:58:06 | Python | UTF-8 | Python | false | false | 2,736 | py | import psycopg2
import datetime
def connect_db(host, user, pswd):
conn = psycopg2.connect(host=host, user=user, password=pswd)
cur = conn.cursor()
return cur, conn
def connect_db_commit_close(cur, conn):
conn.commit()
cur.close()
conn.close()
def create_table_if_noexist(dbtable):
sqlcommand = """CREATE TABLE IF NOT EXISTS """ + dbtable + """
(currencyID int,
Currency varchar(10),
CurrentValue numeric(9,5),
CurrentMin numeric(9,5),
CurrentMax numeric(9,5),
CurrentAverage numeric(9,5),
Date date);"""
conn_db = connect_db("localhost", "postgres", "H0meBase")
cur = conn_db[0]
conn = conn_db[1]
print(sqlcommand)
cur.execute(sqlcommand)
connect_db_commit_close(cur, conn)
def put_values_to_dbtable(dbtable, values):
id = values[0]
currency = values[1]
valuex = values[2]
valuemin = values[3]
valuemax = values[4]
valueaverage = values[5]
date1 = values[6]
time1 = values[7]
date2 = datetime.datetime.strptime(date1 + ' ' + time1, '%Y-%m-%d %H:%M:%S')
print('date2 = ', type(date2))
# print('time = ', type(time))
sqlcommand = "INSERT INTO " + str(dbtable) \
+ '\n' + "SELECT DISTINCT " + '\n' + str(id) + ", \n'" + str(currency) + "', \n" + str(valuex) \
+ ", \n" + str(valuemin) + ", \n" + str(valuemax) + ", \n" + str(valueaverage) + ", \n" \
+ str(date2) \
+ "\nFROM " + str(dbtable) \
+ '\n' + "WHERE NOT EXISTS(SELECT DISTINCT currencyID FROM " + str(dbtable) \
+ " WHERE currencyID = " + str(id) + ");"
# + str(date) + ", \n'" + str(time) \
# sqlcommand = """INSERT INTO """ + str(dbtable) + """
# SELECT DISTINCT """ + str(id) + ',' + str(currency) + ',' + str(valuex) + ',' + str(valuemin) + ',' + \
# str( valuemax) + ',' + str(valueaverage) + ',' + str(date) \
# + ' FROM ' + str(dbtable) + """
# WHERE NOT EXISTS(SELECT DISTINCT PersonID FROM dailyfxcurrency WHERE id = """ + str(id) + """);"""
conn_db = connect_db("localhost", "postgres", "H0meBase")
cur = conn_db[0]
conn = conn_db[1]
print(sqlcommand)
cur.execute(sqlcommand)
connect_db_commit_close(cur, conn)
# TEST
dbtouse = 'dbtesttwo'
create_table_if_noexist(dbtouse)
# DATE = 2020-12-16 / 14:56
# GBP-USD = 1.354775 / min 1.336765 / max 1.354775 / avg 1.3462247916666668
# datatext = (20201216145600, 'GBP-USD' , 1.354775 , 1.336765 , 1.354775 , 1.3462247916666668 , "2020-12-16\n14:56:00")
datatext = (20201216145600, 'GBP-USD' , 1.35477 , 1.33676 , 1.35477 , 1.34622 , "2020-12-16", "14:56:00")
put_values_to_dbtable(dbtouse, datatext)
| [
"[email protected]"
] | |
6238e82437788028beba37bd4dc6074632053960 | bccfab4d853f7417401a084be95de293e66ccd2a | /mySpider/spiders/Museum72.py | 7cda6f18480e54909d7211b01dccc6d7a37022cd | [] | no_license | CS1803-SE/The-First-Subsystem | a8af03ce04a9de72a6b78ece6411bac4c02ae170 | 4829ffd6a83133479c385d6afc3101339d279ed6 | refs/heads/main | 2023-05-06T02:32:08.751139 | 2021-05-24T06:09:37 | 2021-05-24T06:09:37 | 363,400,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,504 | py | #lay
from ..items import *
class Museum72(scrapy.Spider):
name = "Museum72"
allowed_domains = ['nbmuseum.cn']
start_urls = ['http://nbmuseum.cn/col/col41/index.html']
custom_settings = {
'ITEM_PIPELINES': {
'mySpider.pipelines.MuseumPipeLine': 300,
},
'DOWNLOADER_MIDDLEWARES': {
'mySpider.middlewares.DefaultMiddleware': 0,
},
}
def parse(self, response, **kwargs):
item = MuseumBasicInformationItem()
item["museumID"] = 72
item["museumName"] = "宁波博物院"
item["address"] = "宁波市鄞州区首南中路1000号"
# str(response.xpath(
# "/html/body/div[3]/div[2]/div/div[1]/div[7]/dl[1]/dd[3]/text()").extract_first()).replace("\n", "")
item["openingTime"] = "参观入场时间9:00—16:00,闭馆时间17:00。周一闭馆(国家法定节假日除外)。"
# str(response.xpath(
# "/html/body/div[3]/div[2]/div/div[1]/div[7]/dl[2]/dd[3]/text()").extract_first()).replace("\n", "")
item["consultationTelephone"] = "(0574)82815588"
item["publicityVideoLink"] = None
item["longitude"] = "121.551803"
item["latitude"] = "29.821188"
item["introduction"] = response.xpath(
'//*[@id="zoom"]/p[1]/text()').extract()
# str(response.xpath("/html/body/div[3]/div[2]/div/div[1]/div[4]/div[2]").xpath(
# "string(.)").extract_first()).split("\n")[0]
yield item
| [
"[email protected]"
] | |
03d6b9442657118869f4d81fda133e26f127d88b | 645cd832def2330ea923d69fcc82cf5b7719415b | /python/smqtk/algorithms/relevancy_index/__init__.py | 36a407c72a844017a2550d0d6d0e1b72ee4d9c2c | [] | no_license | mrG7/SMQTK | 3103b6c59e347930a330e3284b288cb7af20f3b6 | 19917babd63767726b1bc21a184a5006366b59af | refs/heads/master | 2021-01-20T11:35:21.861708 | 2015-10-02T21:50:44 | 2015-10-02T21:50:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,950 | py | import abc
import logging
from smqtk.algorithms import SmqtkAlgorithm
__author__ = "[email protected]"
class RelevancyIndex (SmqtkAlgorithm):
"""
Abstract class for IQR index implementations.
Similar to a traditional nearest-neighbors algorithm, An IQR index provides
a specialized nearest-neighbors interface that can take multiple examples of
positively and negatively relevant exemplars in order to produce a [0, 1]
ranking of the indexed elements by determined relevancy.
"""
def __len__(self):
return self.count()
@abc.abstractmethod
def count(self):
"""
:return: Number of elements in this index.
:rtype: int
"""
@abc.abstractmethod
def build_index(self, descriptors):
"""
Build the index based on the given iterable of descriptor elements.
Subsequent calls to this method should rebuild the index, not add to it.
:raises ValueError: No data available in the given iterable.
:param descriptors: Iterable of descriptor elements to build index over.
:type descriptors: collections.Iterable[smqtk.representation.DescriptorElement]
"""
@abc.abstractmethod
def rank(self, pos, neg):
"""
Rank the currently indexed elements given ``pos`` positive and ``neg``
negative exemplar descriptor elements.
:param pos: Iterable of positive exemplar DescriptorElement instances.
This may be optional for some implementations.
:type pos: collections.Iterable[smqtk.representation.DescriptorElement]
:param neg: Iterable of negative exemplar DescriptorElement instances.
This may be optional for some implementations.
:type neg: collections.Iterable[smqtk.representation.DescriptorElement]
:return: Map of indexed descriptor elements to a rank value between
[0, 1] (inclusive) range, where a 1.0 means most relevant and 0.0
meaning least relevant.
:rtype: dict[smqtk.representation.DescriptorElement, float]
"""
def get_relevancy_index_impls(reload_modules=False):
"""
Discover and return ``RelevancyIndex`` implementation classes found in the
given plugin search directory. Keys in the returned map are the names of the
discovered classes, and the paired values are the actual class type objects.
We look for modules (directories or files) that start with an alphanumeric
character ('_' prefixed files/directories are hidden, but not recommended).
Within a module we first look for a helper variable by the name
``RELEVANCY_INDEX_CLASS``, which can either be a single class object or
an iterable of class objects, to be exported. If the variable is set to
None, we skip that module and do not import anything. If the variable is not
present, we look for a class by the same name and casing as the module. If
neither are found, the module is skipped.
:param reload_modules: Explicitly reload discovered modules from source.
:type reload_modules: bool
:return: Map of discovered class object of type ``RelevancyIndex`` whose
keys are the string names of the classes.
:rtype: dict of (str, type)
"""
from smqtk.utils.plugin import get_plugins
import os
this_dir = os.path.abspath(os.path.dirname(__file__))
helper_var = "RELEVANCY_INDEX_CLASS"
def class_filter(cls):
log = logging.getLogger('.'.join([__name__,
'get_relevancy_index_impls',
'class_filter']))
if not cls.is_usable():
log.warn("Class type '%s' not usable, filtering out.",
cls.__name__)
return False
return True
return get_plugins(__name__, this_dir, helper_var, RelevancyIndex,
class_filter, reload_modules)
| [
"[email protected]"
] | |
3ee0b16a3fa268a4f70f364d9ab5bc3b4fed2794 | 6630694f401f6f475dd81bb01ff9368db844ccff | /configs/repvgg/repvgg-B2_8xb32_in1k.py | b9a7d4ca5570518f0c4d0b81951e0e97c46606f9 | [
"Apache-2.0"
] | permissive | open-mmlab/mmpretrain | 98a4d6b3bb747efc3d50decebf84fc3ffa41076a | d2ccc44a2c8e5d49bb26187aff42f2abc90aee28 | refs/heads/main | 2023-08-30T19:11:24.771498 | 2023-08-23T02:45:18 | 2023-08-23T02:45:18 | 278,415,292 | 652 | 186 | Apache-2.0 | 2023-09-08T08:01:40 | 2020-07-09T16:25:04 | Python | UTF-8 | Python | false | false | 106 | py | _base_ = './repvgg-A0_8xb32_in1k.py'
model = dict(backbone=dict(arch='B2'), head=dict(in_channels=2560))
| [
"[email protected]"
] | |
e1bfa27cf691a0b6ee3d8d8a074da682433cef02 | f20931826a557f0d884f8b46de259840c29b7428 | /meiduo_mall/meiduo_mall/utils/authenticate.py | 2079f8c659a70dbebbb5291a0c6b7c9cbcc4867e | [] | no_license | zy723/meiduo_project | 38ccecc2fa1d61f2eb848ebc572dd43d45a534c8 | f50a8105c63554b57419cb3494c3d323bb343f9c | refs/heads/master | 2022-12-15T02:34:42.578549 | 2020-05-20T16:56:27 | 2020-05-20T16:56:27 | 248,264,846 | 0 | 0 | null | 2022-12-12T20:28:41 | 2020-03-18T15:08:40 | TSQL | UTF-8 | Python | false | false | 2,024 | py | """
增加支持管理员用户登录账号
JWT扩展的登录视图,在收到用户名与密码时,也是调用Django的认证系统中提供的authenticate()来检查用户名与密码是否正确。
我们可以通过修改Django认证系统的认证后端(主要是authenticate方法)来支持登录账号既可以是用户名也可以是手机号。
修改Django认证系统的认证后端需要继承django.contrib.auth.backends.ModelBackend,并重写authenticate方法。
authenticate(self, request, username=None, password=None, **kwargs)方法的参数说明:
request 本次认证的请求对象
username 本次认证提供的用户账号
password 本次认证提供的密码
我们想要让管理员用户才能登录我们的admin后台,这时我们就要修改django原有的用户验证方法。
重写authenticate方法的思路:
根据username参数查找用户User对象,在查询条件中在加上is_staff=True的条件
若查找到User对象,调用User对象的check_password方法检查密码是否正确
"""
from django.contrib.auth.backends import ModelBackend
from users.models import User
class MeiduoModelBackend(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
# 判断是否通过vue组件发送请求
if request is None:
try:
user = User.objects.get(username=username, is_staff=True)
except:
return None
# 检查密码
if user.check_password(password):
return user
else:
# 变量username的值,可以是用户名,也可以是手机号,需要判断,再查询
try:
user = User.objects.get(username=username)
except:
# 如果未查到数据,则返回None,用于后续判断
return None
# 判断密码
if user.check_password(password):
return user
else:
return None
| [
"[email protected]"
] | |
9e819a87ebc1032db2785b8533da9e1f29cd9fe5 | f62e4c46fb0f98879fb63977fa29631b02e3928c | /16 задание/РекурсФункцСТекст_005.py | 555d533ac95655abf5e3a748654d72868d9b32bc | [] | no_license | SeveralCamper/USE-2020-2021 | c34f4d7a2c3e0f51529141781f523b63242a835d | ac1122649f2fd431a91af5dda5662492e2565109 | refs/heads/master | 2023-09-03T13:36:05.822568 | 2021-10-27T12:54:10 | 2021-10-27T12:54:10 | 392,303,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # Задание 16 № 9163
# Ниже на пяти языках программирования записан рекурсивный алгоритм F.
# Чему равна сумма всех чисел, напечатанных на экране при выполнении вызова F(1)?
count = 0
def F(n):
global count
count += n
print(n)
if n < 4:
F(n + 1)
F(n + 3)
print(F(1), count)
# Ответ: 25 | [
"[email protected]"
] | |
ea8178226894384605a4056cb9263a40ae392b4f | eecd9cb2117aee5f8a41a38e07697524c5908003 | /concourse/client/api.py | 6364f9311bca6d38d5e26f4f165613ef1eb30a26 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | adracus/cc-utils | 426608fb2c7184e5f68c9073da9e7f1535e025ce | dcd1ff544d8b18a391188903789d1cac929f50f9 | refs/heads/master | 2020-04-25T23:30:25.454654 | 2019-02-27T14:31:19 | 2019-02-27T14:31:19 | 173,148,221 | 0 | 0 | Apache-2.0 | 2019-02-28T16:36:09 | 2019-02-28T16:36:09 | null | UTF-8 | Python | false | false | 8,926 | py | # Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import warnings
from abc import abstractmethod
from ensure import ensure_annotations
from urllib3.exceptions import InsecureRequestWarning
from .routes import (
ConcourseApiRoutesBase,
)
from .model import (
Build,
BuildPlan,
BuildEvents,
SetPipelineResult,
PipelineConfig,
ResourceVersion,
)
from model.concourse import (
ConcourseTeamCredentials,
)
from http_requests import AuthenticatedRequestBuilder
from util import not_empty
warnings.filterwarnings('ignore', 'Unverified HTTPS request is being made.*', InsecureRequestWarning)
def select_attr(name: str):
return lambda o: o.get(name)
class ConcourseApiBase(object):
'''
Implements a subset of concourse REST API functionality.
After creation, `login` ought to be invoked at least once to allow for the
execution of requests that required autorization.
@param base_url: concourse endpoint (e.g. https://ci.concourse.ci)
@param team_name: the team name used for authentication
@param verify_ssl: whether or not certificate validation is to be done
'''
@ensure_annotations
def __init__(
self,
routes: ConcourseApiRoutesBase,
request_builder: AuthenticatedRequestBuilder,
verify_ssl=False,
):
self.routes = routes
self.request_builder = request_builder
self.verify_ssl = verify_ssl
@ensure_annotations
def _get(self, url: str):
return self.request_builder.get(url, return_type='json')
@ensure_annotations
def _put(self, url: str, body: str, headers={}, use_auth_token=True):
return self.request_builder.put(url, body=body, headers=headers)
@ensure_annotations
def _post(self, url: str, body: str="", headers={}):
return self.request_builder.post(url, body=body, headers=headers)
@ensure_annotations
def _delete(self, url: str):
return self.request_builder.delete(url)
@abstractmethod
def login(self, team: str, username: str, passwd: str):
raise NotImplementedError
@ensure_annotations
def set_pipeline(self, name: str, pipeline_definition):
previous_version = self.pipeline_config_version(name)
headers = {'x-concourse-config-version': previous_version}
url = self.routes.pipeline_cfg(name)
self._put(url, str(pipeline_definition), headers=headers)
return SetPipelineResult.CREATED if previous_version is None else SetPipelineResult.UPDATED
@ensure_annotations
def delete_pipeline(self, name: str):
url = self.routes.pipeline(pipeline_name=name)
self._delete(url)
def pipelines(self):
pipelines_url = self.routes.pipelines()
response = self._get(pipelines_url)
return map(select_attr('name'), response)
def order_pipelines(self, pipeline_names):
url = self.routes.order_pipelines()
self._put(url, json.dumps(pipeline_names))
@ensure_annotations
def pipeline_cfg(self, pipeline_name: str):
pipeline_cfg_url = self.routes.pipeline_cfg(pipeline_name)
response = self._get(pipeline_cfg_url)
not_empty(response)
return PipelineConfig(response, concourse_api=self, name=pipeline_name)
def pipeline_resources(self, pipeline_names):
if isinstance(pipeline_names, str):
pipeline_names = [pipeline_names]
resources = map(lambda name: self.pipeline_cfg(pipeline_name=name).resources, pipeline_names)
for resource_list in resources:
yield from resource_list
@ensure_annotations
def pipeline_config_version(self, pipeline_name: str):
pipeline_cfg_url = self.routes.pipeline_cfg(pipeline_name)
response = self.request_builder.get(
pipeline_cfg_url,
return_type=None,
check_http_code=False
)
if response.status_code == 404:
return None # pipeline did not exist yet
# ensure we did receive an error other than 404
self.request_builder._check_http_code(response, pipeline_cfg_url)
return response.headers['X-Concourse-Config-Version']
@ensure_annotations
def unpause_pipeline(self, pipeline_name: str):
unpause_url = self.routes.unpause_pipeline(pipeline_name)
self.request_builder.put(
unpause_url,
body=""
)
@ensure_annotations
def expose_pipeline(self, pipeline_name: str):
expose_url = self.routes.expose_pipeline(pipeline_name)
self.request_builder.put(
expose_url,
body="",
)
@ensure_annotations
def job_builds(self, pipeline_name: str, job_name: str):
'''
Returns a list of Build objects for the specified job.
The list is sorted by the build number, newest build last
'''
builds_url = self.routes.job_builds(pipeline_name, job_name)
response = self._get(builds_url)
builds = [Build(build_dict, self) for build_dict in response]
builds = sorted(builds, key=lambda b: b.id())
return builds
@ensure_annotations
def job_build(self, pipeline_name: str, job_name: str, build_name: str):
build_url = self.routes.job_build(pipeline_name, job_name, build_name)
response = self._get(build_url)
return Build(response, self)
@ensure_annotations
def trigger_build(self, pipeline_name: str, job_name: str):
trigger_url = self.routes.job_builds(pipeline_name, job_name)
self._post(trigger_url)
@ensure_annotations
def build_plan(self, build_id):
build_plan_url = self.routes.build_plan(build_id)
response = self._get(build_plan_url)
return BuildPlan(response, self)
@ensure_annotations
def build_events(self, build_id):
build_plan_url = self.routes.build_events(build_id)
# TODO: this request never seems to send an "EOF"
# (probably to support streaming)
# --> properly handle this special case
response = self.request_builder.get(
build_plan_url,
return_type=None,
stream=True # passed to sseclient
)
return BuildEvents(response, self)
@ensure_annotations
def trigger_resource_check(self, pipeline_name: str, resource_name: str):
url = self.routes.resource_check(pipeline_name=pipeline_name, resource_name=resource_name)
# Resource checks are triggered by a POST with an empty JSON-document as body against
# the resource's check-url
self._post(url, body='{}')
@ensure_annotations
def resource_versions(self, pipeline_name: str, resource_name: str):
url = self.routes.resource_versions(pipeline_name=pipeline_name, resource_name=resource_name)
response = self._get(url)
return [ResourceVersion(raw=raw, concourse_api=None) for raw in response]
class ConcourseApiV4(ConcourseApiBase):
def login(self, username: str, passwd: str):
login_url = self.routes.login()
form_data = "grant_type=password&password=" + passwd + \
"&scope=openid+profile+email+federated%3Aid+groups&username=" + username
response = self._post(
url=login_url,
body=form_data,
headers={"content-type": "application/x-www-form-urlencoded"}
)
auth_token = response.json()['access_token']
self.request_builder = AuthenticatedRequestBuilder(
auth_token=auth_token,
verify_ssl=self.verify_ssl
)
return auth_token
def set_team(self, team_credentials: ConcourseTeamCredentials):
body = {}
body['auth'] = {
"users": [
"local:" + team_credentials.username()
]
}
if team_credentials.has_github_oauth_credentials():
body['auth'].update({
"groups": [
"github:" + team_credentials.github_auth_team()
]
})
team_url = self.routes.team_url(team_credentials.teamname())
self._put(team_url, json.dumps(body))
| [
"[email protected]"
] | |
270526ead40fed7395ab36a2f0e5538850c9fcd5 | 43575c1324dc0760958a110d7f056bce88422a03 | /listing/arrayqueue.py | fe91f1bc669035d14da292523bec642ed81d0941 | [] | no_license | nicolas4d/Data-Structures-and-Algorithms-Using-Python | 1ffd74d26f09de2057bdc53998a56e56ed77c1de | a879ce6fd4033867783ee487d57d459b029eb5f8 | refs/heads/master | 2020-09-24T12:48:30.726766 | 2019-12-31T03:15:44 | 2019-12-31T03:15:44 | 225,761,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | # Implementation of the Queue ADT using a circular array.
from array import Array
class Queue :
# Creates an empty queue.
def __init__( self, maxSize ) :
self._count = 0
self._front = 0
self._back = maxSize - 1
self._qArray = Array( maxSize )
# Returns True if the queue is empty.
def isEmpty( self ) :
return self._count == 0
# Returns True if the queue is full.
def isFull( self ) :
return self._count == len(self._qArray)
# Returns the number of items in the queue.
def __len__( self ) :
return self._count
# Adds the given item to the queue.
def enqueue( self, item ):
assert not self.isFull(), "Cannot enqueue to a full queue."
maxSize = len(self._qArray)
self._back = (self._back + 1) % maxSize
self._qArray[self._back] = item
self._count += 1
# Removes and returns the first item in the queue.
def dequeue( self ):
assert not self.isEmpty(), "Cannot dequeue from an empty queue."
item = self._qArray[ self._front ]
maxSize = len(self._qArray)
self._front = (self._front + 1) % maxSize
self._count -= 1
return item
| [
"[email protected]"
] | |
ac5953399a647183382fd235afa3078fcf3f2cf8 | 7c9f28e371e8dfa9290c05a48a9d924484b4b18c | /1.py | e742ff9920b98902a4a6413dc4ff9b62916929d0 | [] | no_license | Pavithralakshmi/corekata | 1f9d963da44a6fdcdedaf2e39452545f6cc52e9b | 06d1c7bba25681ce12e2ab93ce461228afb6b984 | refs/heads/master | 2021-04-30T01:53:37.414318 | 2018-10-11T17:58:39 | 2018-10-11T17:58:39 | 121,491,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | s1=input("eter anything")
s2=input("enter somthing")
print(s2)
| [
"[email protected]"
] | |
ed2bd9596b3c087bafd28769728ffae53934a728 | e12e1e738d06dbbcdb7f3d051614e7aa493f795d | /mysite/config/settings.py | 41635b0505022517d3d2ca9886160f51f35e9721 | [] | no_license | suhjohn/ec2-deploy-mysite | 34c13e1ae3ff33ca14a6223ee8036432ea98d460 | fb3c33cb64ecfa673f16da0385942f76bde748a1 | refs/heads/master | 2021-07-19T17:57:53.701059 | 2017-10-27T05:33:27 | 2017-10-27T05:33:27 | 108,366,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,380 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8!yz3f*(+w^kkhls0sl3)lfngzupjo(rsydyr2(89ci7!av(_w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'.ap-northeast-2.compute.amazonaws.com',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_DIR = os.path.join(BASE_DIR, "static")
#Django에서 정적파일을 검색하고 가져올 폴더 목
STATICFILES_DIRS = [
STATIC_DIR,
]
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/media/'
| [
"[email protected]"
] | |
c663cfef1a695d5be22587d9ff42d87025c79fdc | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /netex/models/destination_display_variant_ref.py | 3255e0537fdd4c971133e232146768cd1aa74573 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 332 | py | from dataclasses import dataclass
from .destination_display_variant_ref_structure import DestinationDisplayVariantRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class DestinationDisplayVariantRef(DestinationDisplayVariantRefStructure):
class Meta:
namespace = "http://www.netex.org.uk/netex"
| [
"[email protected]"
] | |
3a2ad1e33b7dc2a198f28492c836efb94a98b834 | 3562fa51db47b1b1e97785191f0c04644d47c283 | /python/plat3/2152.py | 62489943de0aeb7ac35dc6bf78cf5e4aa950e1b3 | [] | no_license | seono/algorithm | c74181d564525e3a0214824c4a619c51cd52a042 | 78a252b29290eaa1ea076d76cd83e5dbbb7d8d89 | refs/heads/master | 2021-07-13T07:13:41.523888 | 2021-04-24T14:05:00 | 2021-04-24T14:05:00 | 244,609,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | import sys
from collections import deque
sys.setrecursionlimit(100000)
input = sys.stdin.readline
N, M, S, T = map(int,input().split())
adj = [[] for _ in range(N+1)]
for i in range(M):
s,t = map(int,input().split())
adj[s].append(t)
cnt,SN = 0,0
dfsn = [0]*(N+1)
scc_arr = []
scc_num = [0]*(N+1)
finished = [False]*(N+1)
st = []
def scc(idx):
global cnt,SN
dfsn[idx] = cnt+1
cnt+=1
st.append(idx)
result = dfsn[idx]
for nx in adj[idx]:
if dfsn[nx]==0:result = min(result,scc(nx))
elif not finished[nx]: result = min(result, dfsn[nx])
if result == dfsn[idx]:
curSCC = []
while True:
t = st.pop()
curSCC.append(t)
finished[t]=True
scc_num[t]=SN
if t==idx:break
scc_arr.append(curSCC)
SN+=1
return result
for i in range(1,N+1):
if dfsn[i]==0:scc(i)
new_adj = [[] for _ in range(SN)]
indgree = [0]*SN
finished = [0]*SN
new_s,new_t = scc_num[S],scc_num[T]
for i,tmp in enumerate(scc_arr):
for n in tmp:
for nx in adj[n]:
if scc_num[nx]==i:continue
new_adj[i].append(scc_num[nx])
indgree[scc_num[nx]]+=1
def dfs():
can = [False]*SN
can[new_s]=True
finished[new_s]=len(scc_arr[new_s])
q = deque([])
for i in range(SN):
if not indgree[i]: q.append(i)
while q:
n = q.popleft()
for nx in new_adj[n]:
if can[n]:
finished[nx]=max(finished[nx],finished[n]+len(scc_arr[nx]))
can[nx]=True
indgree[nx]-=1
if indgree[nx]==0:
q.append(nx)
return finished[new_t]
print(dfs()) | [
"[email protected]"
] | |
a32df99969cc2b00821ca9dfd9e146584b61aad7 | ed63b9b615c0f1484746e87b54a0c0b233ddf5c2 | /tests/test_parser.py | 90b755aedee63b62e364f59f3cb3c53381aaf2e0 | [
"MIT"
] | permissive | timofurrer/embedeval | ae02026da6cd5601b16afe1cbb543552cbf461ac | 08a69c950c9a0ac59a8a0ca728af2627c7bcc43a | refs/heads/master | 2020-07-29T10:41:56.322842 | 2020-01-20T10:32:46 | 2020-01-20T10:32:51 | 209,766,108 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,581 | py | """
embedeval
~~~~~~~~~
NLP Embedding Evaluation Tool
:copyright: (c) 2019 by Timo Furrer <[email protected]>
:license: MIT, see LICENSE for more details.
"""
import textwrap
import uuid
import numpy as np
import pytest
from embedeval.parsers.word2vec_gensim import load_embedding as gensim_load_embedding
from embedeval.parsers.word2vec_simple import load_embedding as simple_load_embedding
def create_tmp_word_embedding(path, embedding_content):
"""Create a temporary Word Embedding file"""
# FIXME(TF): maybe refactor interface so that file system can be avoided in unit tests.
created_file = path / str(uuid.uuid4())
with open(created_file, "w", encoding="utf-8") as embedding_file:
embedding_file.write(textwrap.dedent(embedding_content).strip())
return created_file
@pytest.mark.parametrize(
"load_embedding_func",
[
pytest.param(simple_load_embedding, id="simple parser"),
pytest.param(gensim_load_embedding, id="gensim parser"),
],
)
def test_should_parse_word2vec_with_single_entry(load_embedding_func, tmp_path):
"""Loading a Word2Vec Embedding should pass for single word"""
# GIVEN
word2vec_path = create_tmp_word_embedding(
tmp_path,
"""
1 2
word 1.0 2.0
""",
)
# WHEN
embedding = load_embedding_func(word2vec_path)
# THEN
assert embedding.get_words() == ["word"]
assert np.array_equal(embedding.get_word_vector("word"), np.array([1.0, 2.0]))
@pytest.mark.parametrize(
"load_embedding_func",
[
pytest.param(simple_load_embedding, id="simple parser"),
pytest.param(gensim_load_embedding, id="gensim parser"),
],
)
def test_should_parse_word2vec_with_multiple_entires(load_embedding_func, tmp_path):
"""Loading a Word2Vec Embedding should pass for multiple word entries"""
# GIVEN
word2vec_path = create_tmp_word_embedding(
tmp_path,
"""
4 2
word1 1.0 2.0
word2 3.0 4.0
word3 5.0 6.0
word4 7.0 8.0
""",
)
# WHEN
embedding = load_embedding_func(word2vec_path)
# THEN
assert embedding.get_words() == ["word1", "word2", "word3", "word4"]
assert np.array_equal(embedding.get_word_vector("word1"), np.array([1.0, 2.0]))
assert np.array_equal(embedding.get_word_vector("word2"), np.array([3.0, 4.0]))
assert np.array_equal(embedding.get_word_vector("word3"), np.array([5.0, 6.0]))
assert np.array_equal(embedding.get_word_vector("word4"), np.array([7.0, 8.0]))
| [
"[email protected]"
] | |
1e7fd967ad595fb9792cb574c9219de21724fb93 | ac652ff7636d4c3336918d0f96aa8ea1bba3ab28 | /fastvid/posts/serializers/postmodel.py | c0152d21e28e0e4c646800e2244e7b172f680400 | [] | no_license | pn101/fastvid | eebff58e9dd6b967a52361713ed34462e0713d88 | 9f57c577c558906e3fd5c3ab44f112588ae84ed2 | refs/heads/develop | 2021-01-20T18:58:34.398325 | 2016-07-05T09:29:49 | 2016-07-05T09:29:49 | 61,545,668 | 0 | 0 | null | 2016-06-20T13:14:42 | 2016-06-20T12:22:08 | null | UTF-8 | Python | false | false | 412 | py | from rest_framework import serializers
from posts.models import Post
class PostModelSerializer(serializers.ModelSerializer):
username = serializers.CharField(source='user.username')
class Meta:
model = Post
fields = [
'pk',
'username',
'title',
'content',
'youtube_original_url',
'youtube_embed_url',
]
| [
"[email protected]"
] | |
b4819e1ec3e683284917e6a9291f28ae1220f9c7 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/DELL-NETWORKING-TC.py | a5184bbe6b70262343039230a6f6eb6c4efb5c16 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 18,834 | py | #
# PySNMP MIB module DELL-NETWORKING-TC (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DELL-NETWORKING-TC
# Produced by pysmi-0.3.4 at Wed May 1 12:37:51 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint")
dellNetModules, = mibBuilder.importSymbols("DELL-NETWORKING-SMI", "dellNetModules")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, ObjectIdentity, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Counter64, MibIdentifier, iso, Gauge32, TimeTicks, Bits, Counter32, NotificationType, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "ObjectIdentity", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Counter64", "MibIdentifier", "iso", "Gauge32", "TimeTicks", "Bits", "Counter32", "NotificationType", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
dellNetTextualConventions = ModuleIdentity((1, 3, 6, 1, 4, 1, 6027, 4, 2))
dellNetTextualConventions.setRevisions(('2009-04-07 12:00', '2008-09-16 12:00', '2008-09-02 12:00', '2007-06-28 12:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: dellNetTextualConventions.setRevisionsDescriptions(('Added new Nemesis card type.', 'Added ExaScale chassis mode and Nemesis card type.', 'Added DellNetCardOperStatus.', 'Added DellNetChassisType and DellNetHundredthdB.',))
if mibBuilder.loadTexts: dellNetTextualConventions.setLastUpdated('200904071200Z')
if mibBuilder.loadTexts: dellNetTextualConventions.setOrganization('Dell Inc')
if mibBuilder.loadTexts: dellNetTextualConventions.setContactInfo('http://www.dell.com/support')
if mibBuilder.loadTexts: dellNetTextualConventions.setDescription('The Textual Convention of Dell Networking OS MIB.')
class DellNetChassisType(TextualConvention, Integer32):
description = 'Dell Networking OS chassis type.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48))
namedValues = NamedValues(("e1200", 1), ("e600", 2), ("e300", 3), ("e150", 4), ("e610", 5), ("c150", 6), ("c300", 7), ("e1200i", 8), ("s2410cp", 9), ("s2410p", 10), ("s50", 11), ("s50e", 12), ("s50v", 13), ("s50nac", 14), ("s50ndc", 15), ("s25pdc", 16), ("s25pac", 17), ("s25v", 18), ("s25n", 19), ("s60", 20), ("s55", 21), ("s4810", 22), ("s6410", 23), ("z9000", 24), ("m-MXL", 25), ("m-IOA", 26), ("s4820", 27), ("s6000", 28), ("s5000", 29), ("s-FN410S-IOA", 30), ("s-FN410T-IOA", 31), ("s-FN2210S-IOA", 32), ("z9500", 33), ("c9010", 34), ("c1048p", 35), ("s4048on", 36), ("s4810on", 37), ("s6000on", 38), ("s3048on", 39), ("z9100", 40), ("s6100", 41), ("s3148p", 42), ("s3124p", 43), ("s3124f", 44), ("s3124", 45), ("s3148", 46), ("s4048ton", 47), ("s6010", 48))
class DellNetInterfaceType(TextualConvention, Integer32):
description = 'Interface types supported by the Dell Networking OS line cards. '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
namedValues = NamedValues(("ethernetManagement", 1), ("ethernet100M", 2), ("ethernet1GB", 3), ("ethernet1GBCopper", 4), ("ethernet10GB", 5), ("ethernet10GBCopper", 6), ("sonetOC3OC12", 7), ("sonetOC48OC96", 8), ("sonetOC192", 9), ("ethernet40GB", 10))
class DellNetSystemPortType(TextualConvention, Integer32):
description = 'Port type available in Dell Networking OS series of products.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 99))
namedValues = NamedValues(("portSerial", 1), ("portAux", 2), ("portFastEther", 3), ("port0210E2TV", 4), ("port0210E2TE", 5), ("port2401E24S", 6), ("port2401E24L", 7), ("port12OC12OC3", 8), ("port01OC192", 9), ("port2401E24SEC", 10), ("port2401E24LEC", 11), ("port0210E2TY", 12), ("port0210E2TU", 13), ("port0110EW1YB", 14), ("port0110EW1YC", 15), ("port02S48YC2", 16), ("port0110EX1YB", 17), ("port0110EX1YC", 18), ("port1201F12PB", 19), ("port1201F12PC", 20), ("port0110EX1EB", 21), ("port0110EX1EC", 22), ("port0110EX1YBL", 23), ("port0210EX2YD", 24), ("port0210EX2ED", 25), ("port0210EX2ZD-DEP", 26), ("port0210EW2YD", 27), ("port0110EX1YD", 28), ("port0110EX1ED", 29), ("port0110EX1ZD", 30), ("port0110EW1YD", 31), ("port2401E24PD", 32), ("port0210EX2YD2", 33), ("port0210EX2YE", 34), ("port0110EX1YD2", 35), ("port0110EX1YE", 36), ("port0210EW2YD2", 37), ("port0210EW2YE", 38), ("port0110EW1YE", 39), ("port01OC192SE", 40), ("port2401E24TD", 41), ("port2401E24PE", 42), ("port1201F12PC2", 43), ("port0210EX2ZD", 44), ("port0210EW2YD3", 45), ("port0210EX2ZE", 46), ("port1201F12PE", 47), ("port2401E24PD2", 48), ("port1201E12TD3", 49), ("port0210EX2YD3", 50), ("port0110EX1YD3", 51), ("port1201E12PD3", 52), ("port02S48YE2", 53), ("port0110EX1YE3", 54), ("port1201E12PE3", 55), ("port4801E48PF", 56), ("port2401E24PF3", 57), ("port4801E48TF3", 58), ("port4801E48TF", 59), ("port0410EXW4PF", 60), ("port0210EXW2PF3", 61), ("port9001E90MF", 62), ("port4801E48T1F", 63), ("port1610EXW16PF", 64), ("port0810EXW8PF", 65), ("port0410EXW4PG", 66), ("port4801E48PG", 67), ("port4801E48TG", 68), ("port0210EXW2PG3", 69), ("port2401E24PG3", 70), ("port2401E24TG3", 71), ("port04S48P4G", 72), ("port04S48P4G3", 73), ("port1610EXW16PG", 74), ("port0810EXW8PG3", 75), ("port9001E90MH", 76), ("port1010EXW10SH", 77), ("port1010EXW10SJ", 78), ("port9001E90MJ", 79), ("port5001E50PH", 80), ("port5001E50PJ", 81), ("port1010EXW10PH", 82), ("port1010EXW10PJ", 83), ("port4010EXW40SH", 84), ("port4010EXW40SJ", 85), ("portUnknown", 99))
class DellNetSystemCardType(TextualConvention, Integer32):
description = 'The processor card supported by the Dell Networking OS products .'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 200, 201, 202, 203, 204, 205, 206, 207, 208, 250, 259))
namedValues = NamedValues(("notPresented", 0), ("lc0210E2TV", 1), ("lc0210E2TE", 2), ("lc2401E24S", 3), ("lc2401E24L", 4), ("lc12OC12OC3", 5), ("lc01OC192", 6), ("lcReserve", 7), ("lc2401E24SEC", 8), ("lc2401E24lEc", 9), ("lc0210E2TY", 10), ("lc0210E2TU", 11), ("lc0110EW1YB", 12), ("lc0110EW1YC", 13), ("lc02S48YC2", 14), ("lc0110EX1YB", 15), ("lc0110EX1YC", 16), ("lc1201F12PB", 17), ("lc1201F12PC", 18), ("lc0110EX1EB", 19), ("lc0110EX1EC", 20), ("lc0110EX1YBL", 21), ("lc0210EX2YD", 22), ("lc0210EX2ED", 23), ("lc0210EX2ZDdep", 24), ("lc0210EW2YD", 25), ("lc0110EX1YD", 26), ("lc0110EX1ED", 27), ("lc0110EX1ZD", 28), ("lc0110EW1YD", 29), ("lc2401E24PD", 30), ("lc0210EX2YD2", 31), ("lc0210EX2YE", 32), ("lc0110EX1YD2", 33), ("lc0110EX1YE", 34), ("lc0210EW2YD2", 35), ("lc0210EW2YE", 36), ("lc0110EW1YE", 37), ("lc01OC192SE", 38), ("lc2401E24TD", 39), ("lc2401E24PE", 40), ("lc1201F12PC2", 41), ("lc0210EX2ZD", 42), ("lc0210EW2YD3", 43), ("lc0210EX2ZE", 44), ("lc1201F12PE", 45), ("lc2401E24PD2", 46), ("lc0210EX2ZD2", 47), ("lc1201E12TD3", 48), ("lc0210EX2YD3", 49), ("lc0110EX1YD3", 50), ("lc1201E12PD3", 51), ("lc02S48YE2", 52), ("lc0110EX1YE3", 53), ("lc1201E12PE3", 54), ("lc4801E48PF", 55), ("lc2401E24PF3", 56), ("lc4801E48TF3", 57), ("lc4801E48TF", 58), ("lc0410EXW4PF", 59), ("lc0210EXW2PF3", 60), ("lc9001E90MF", 61), ("lc4801E48T1F", 62), ("lc1610EXW16PF", 63), ("lc0810EXW8PF", 64), ("lc0410EXW4PG", 65), ("lc4801E48PG", 66), ("lc4801E48TG", 67), ("lc0210EXW2PG3", 68), ("lc2401E24PG3", 69), ("lc2401E24TG3", 70), ("lc04S48P4G", 71), ("lc04S48P4G3", 72), ("lc1610EXW16PG", 73), ("lc0810EXW8PG3", 74), ("lc9001E90MH", 75), ("lc1010EXW10SH", 76), ("lc1010EXW10SJ", 77), ("lc9001E90MJ", 78), ("lc5001E50PH", 79), ("lc5001E50PJ", 80), ("lc1010EXW10PH", 81), ("lc1010EXW10PJ", 82), ("lc4010EXW40SH", 83), ("lc4010EXW40SJ", 84), ("z9500LC12", 85), ("z9500LC36", 86), ("z9500LC48", 87), ("c9000LC24X10GCu", 88), ("c9000LC24X10GOptics", 89), ("c9000LC6X40G", 90), ("rpmCard", 200), ("rpmCardEB", 201), ("rpmCardED", 202), ("rpmCardEE", 203), ("rpmCardEE3", 204), ("rpmCardEF", 205), ("rpmCardEF3", 206), ("rpmCardEH", 207), ("supCard", 208), ("sfmCard", 250), ("cardUnknown", 259))
class DellNetCardOperStatus(TextualConvention, Integer32):
description = "The operational status provides further condition of the card. If AdminStatus is changed to 'up', then the valid state is 'ready' - the card is present and ready and operational packets can be passed If AdminStatus is changed to 'down', the states can be as followed: 'cardNotmatch'- the card does not matche what is configured 'cardProblem' - the card detects hardware problems 'diagMode' - the card in the diagnostic mode 'cardAbsent' - the card is not present 'offline' - the card is not used."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("ready", 1), ("cardNotmatch", 2), ("cardProblem", 3), ("diagMode", 4), ("cardAbsent", 5), ("offline", 6))
class DellNetIfType(TextualConvention, Integer32):
description = 'Port type available in Dell Networking OS products.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 99))
namedValues = NamedValues(("portSerial", 1), ("portFastEther", 2), ("portGigEther", 3), ("port10GigEther", 4), ("port40GigEther", 5), ("portFibreChannel", 6), ("portAux", 7), ("portUnknown", 99))
class DellNetCSeriesCardType(TextualConvention, Integer32):
description = 'The processor card supported by the Dell Networking OS C-Series system products .'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 99, 1024, 1026, 1027, 1028, 1280, 1284, 2049, 200))
namedValues = NamedValues(("notPresented", 0), ("cardUnknown", 99), ("lc4802E48TB", 1024), ("lc0410EX4PB", 1026), ("lc4801E48PB", 1027), ("lc4610E46TB", 1028), ("lc4802E48VB", 1280), ("lc4610E46VB", 1284), ("lc0810EX8PB", 2049), ("rpmCard", 200))
class DellNetProcessorModuleType(TextualConvention, Integer32):
description = 'The processor modules supported by the Dell Networking OS card.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("controlProcessor", 1), ("routingProcessor1", 2), ("routingProcessor2", 3), ("linecardProcessor", 4), ("rpmProcessor", 5), ("routingProcessor", 6))
class DellNetSlotState(TextualConvention, Integer32):
description = 'A bit string that represents the status of the slot in a E1200 chassis. Slot# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 1 1 1 0 1 1 1 0 1 1 1 1 1 1 1 1 | | | | | Least Significant bit <-----+ | +-----> Most Significant bit The slot numbers starts with the most significant bit. The most significant bit represents slot number 1 and the least significant bit is slot 16. A bit string that represents the status of the slot in a E600 chassis. Slot# 1 2 3 4 5 6 7 8 9 1 1 1 0 1 1 1 0 1 | | | V | Least Significant bit | +-----> Most Significant bit The slot numbers starts with the most significant bit. The most significant bit represents slot number 1 and the least significant bit is slot 9. Each slot occupies a bit. The value 1 indicates slot is in used and 0 indicates slot is empty.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 65535)
class DellNetSlotID(TextualConvention, Integer32):
description = 'Dell Networking OS Chassis Slot ID. '
status = 'current'
class DellNetSwDate(DisplayString):
description = 'The date format is MM/DD/YYYY. MM = Month DD = Day YYYY = Year For example, January 24, 2002 would be displayed as: 01/24/2002 '
status = 'current'
class DellNetMfgDate(DisplayString):
description = 'The manufacturing date format is PPWWYYYY PP = Plant #(ie, what building made the board;01= Sanmina Bldg 4,02=Sanmina Bldg 2) WW = Week number (01 = First full week of the year ie, Sunday through Saturday) YYYY = Year For example, 01482001 would have been produced at Samina Bldg 4 during the first week of December, 2001. '
status = 'current'
class PortList(TextualConvention, OctetString):
description = "Each octet within this value specifies a set of eight ports, with the first octet specifying ports 1 through 8, the second octet specifying ports 9 through 16, etc. Within each octet, the most significant bit represents the lowest numbered port, and the least significant bit represents the highest numbered port. Thus, each port of the bridge is represented by a single bit within the value of this object. If that bit has a value of '1' then that port is included in the set of ports; the port is not included if its bit has a value of '0'."
status = 'current'
class DellNetVlanID(TextualConvention, Integer32):
description = 'Dell Networking OS VLAN ID. A value used to index per-VLAN tables: values of 0 and 4095 are not permitted; if the value is between 1 and 4094 inclusive, it represents an IEEE 802.1Q VLAN-ID with global scope within a given bridged domain (see VlanId textual convention). If the value is greater than 4095 then it represents a VLAN with scope local to the particular agent, i.e. one without a global VLAN-ID assigned to it. Such VLANs are outside the scope of IEEE 802.1Q but it is convenient to be able to manage them in the same way using this MIB.'
status = 'current'
class DellNetChassisMode(TextualConvention, Integer32):
description = 'The chassis mode in Dell Networking series of products.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("nonJumbo", 0), ("etherScale", 1), ("mixed", 2), ("teraScale", 3), ("cseries1", 4), ("sseries1", 5), ("exaScale", 6))
class DellNetQueueID(TextualConvention, Integer32):
description = 'Dell Networking OS Queue ID. '
status = 'current'
class DellNetPortPipeID(TextualConvention, Integer32):
description = 'Dell Networking OS PortPipe ID. '
status = 'current'
class DellNetCycloneVersion(TextualConvention, Integer32):
description = 'the Dell Networking OS Cyclone based hardware version'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("onePointFive", 1), ("twoPointZero", 2), ("threePointZero", 3))
class DellNetCamPartitionType(TextualConvention, Integer32):
description = 'The CAM partition supported in the Dell Networking OS line card. The sequecing used here is Layer 2 Ingress CAM range is 1 - 30 Layer 2 Egress CAM range is 31 - 60 Layer 3 Ingress CAM range is 61 - 90 Layer 3 Egress CAM range is 91 - 120 Layer 3 Host abd LPM CAM (BCM specific) range is 121 - 150 '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 31, 61, 62, 63, 64, 65, 66, 67, 91, 121, 122))
namedValues = NamedValues(("layer2AclIngress", 1), ("layer2AclPvstIngress", 2), ("layer2FibIngress", 3), ("layer2FibEgress", 31), ("layer3AclIngress", 61), ("layer3FibIngress", 62), ("layer3SysFlowIngress", 63), ("layer3TrcListIngress", 64), ("layer3McastFibIngress", 65), ("layer3QosIngress", 66), ("layer3PbrIngress", 67), ("layer3AclEgress", 91), ("layer3ExtHost", 121), ("layer3ExtLPM", 122))
class DellNetHundredthdB(TextualConvention, Integer32):
description = 'This data type represents power levels that are normally expressed in dB. Units are in hundredths of a dB; for example, -7.23 dB will be represented as -723.'
status = 'current'
displayHint = 'd-2'
class DellNetDeviceType(TextualConvention, Integer32):
description = 'The device category running the Dell Networking OS'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("chassis", 1), ("stack", 2), ("rpm", 3), ("supervisor", 4), ("linecard", 5), ("port-extender", 6))
class DellNetPEOperStatus(TextualConvention, Integer32):
description = 'The operational status of the port extender'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("up", 1), ("down", 2))
mibBuilder.exportSymbols("DELL-NETWORKING-TC", dellNetTextualConventions=dellNetTextualConventions, DellNetSwDate=DellNetSwDate, DellNetPEOperStatus=DellNetPEOperStatus, DellNetInterfaceType=DellNetInterfaceType, DellNetPortPipeID=DellNetPortPipeID, DellNetCamPartitionType=DellNetCamPartitionType, DellNetIfType=DellNetIfType, DellNetCardOperStatus=DellNetCardOperStatus, DellNetSlotID=DellNetSlotID, DellNetCSeriesCardType=DellNetCSeriesCardType, PortList=PortList, DellNetVlanID=DellNetVlanID, DellNetDeviceType=DellNetDeviceType, DellNetChassisMode=DellNetChassisMode, PYSNMP_MODULE_ID=dellNetTextualConventions, DellNetCycloneVersion=DellNetCycloneVersion, DellNetMfgDate=DellNetMfgDate, DellNetQueueID=DellNetQueueID, DellNetSlotState=DellNetSlotState, DellNetSystemPortType=DellNetSystemPortType, DellNetHundredthdB=DellNetHundredthdB, DellNetChassisType=DellNetChassisType, DellNetProcessorModuleType=DellNetProcessorModuleType, DellNetSystemCardType=DellNetSystemCardType)
| [
"[email protected]"
] | |
6f47ad10c4d8add20d063805aae912c0a742a686 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/bd_-11162/sdB_bd_-11162_lc.py | b076fadab2c00cc4cf60f263d06b3a4df11907ba | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[13.062792,-10.662778], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_bd_-11162/sdB_bd_-11162_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1e3299112d0d4a422e71d7d55d2a4869b4e74dc6 | 917e376668f325c0452fe05fcf3f6348a6ac4336 | /tests/xla_interpreter_test.py | d3b758aa0cb0e09d3959f3ad74c8e0192d75cc0a | [
"Apache-2.0"
] | permissive | wusixer/jax | 5f8d78a89679db74d0d62806725cc820246d4b4e | 66de981e1dfbe04a41b2c003f171fea7bb92585f | refs/heads/main | 2023-06-15T09:10:45.599555 | 2021-07-06T01:58:11 | 2021-07-06T01:58:11 | 383,305,925 | 0 | 0 | NOASSERTION | 2021-07-06T01:32:55 | 2021-07-06T01:32:55 | null | UTF-8 | Python | false | false | 1,252 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl.testing import absltest
from jax import test_util as jtu
from jax._src import api
from jax.interpreters import xla
class XlaInterpreterTest(jtu.JaxTestCase):
@unittest.skipIf(not xla._ALLOW_ARG_PRUNING, "Test requires jaxlib 0.1.66")
def test_prune_jit_args(self):
def f(*args):
return args[0]
closed_jaxpr = api.make_jaxpr(f)(*range(10))
pruned_jaxpr, kept_const_idx, kept_var_idx = xla._prune_unused_inputs(
closed_jaxpr.jaxpr)
assert len(pruned_jaxpr.invars) == 1
assert kept_const_idx == set()
assert kept_var_idx == {0}
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| [
"[email protected]"
] | |
acce8e1a21ccf28ffadc38b8002f50cdbcf6987b | afdda9b5185826747814dd82fdf74f809cfa62ef | /Python/tdw/librarian.py | ce2301ec287ecfd7eebbf45a81bb85c00ca3ed8d | [
"BSD-2-Clause"
] | permissive | lijin929/tdw | e011b831c650a383a22e7e16d934c7940416fcd0 | 957cff2b400fbd24e31bbae886c307ecb7a74cdb | refs/heads/master | 2023-08-14T22:35:29.172322 | 2021-09-14T13:52:30 | 2021-09-14T13:52:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,269 | py | import json
from typing import List, Dict, TypeVar, Union, Generic, Optional, Tuple
import pkg_resources
from pathlib import Path
import platform
from secrets import token_hex
class _Record:
"""
Abstract class for a metadata record.
"""
_PLATFORM = platform.system()
def __init__(self, data: Optional[dict] = None):
"""
:param data: JSON data for the record. If None, the record will initialize with default values.
"""
if data is None:
self.name: str = ""
self.urls: Dict[str, str] = {"Windows": "", "Darwin": "", "Linux": ""}
else:
self.name = data["name"]
self.urls: Dict[str, str] = data["urls"]
def get_url(self) -> str:
"""
Returns the URL of the asset bundle for this platform. This is a wrapper for record.urls.
"""
return self.urls[_Record._PLATFORM]
def get_serializable(self) -> dict:
"""
Returns the serializable dictionary of this record.
"""
return self.__dict__
class ModelRecord(_Record):
"""
A record of a model asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
if data is None:
self.wnid: str = ""
self.wcategory: str = ""
self.scale_factor: float = 1
self.do_not_use: bool = False
self.do_not_use_reason: str = ""
self.flex: bool = False
self.substructure: List[dict] = []
self.bounds: Dict[str, Dict[str, float]] = {"back": {"x": 0, "y": 0, "z": 0},
"bottom": {"x": 0, "y": 0, "z": 0},
"center": {"x": 0, "y": 0, "z": 0},
"front": {"x": 0, "y": 0, "z": 0},
"left": {"x": 0, "y": 0, "z": 0},
"right": {"x": 0, "y": 0, "z": 0},
"top": {"x": 0, "y": 0, "z": 0}}
self.canonical_rotation: Dict[str, float] = {"x": 0, "y": 0, "z": 0}
self.physics_quality: float = -1
self.asset_bundle_sizes: Dict[str, int] = {"Windows": -1, "Darwin": -1, "Linux": -1}
self.composite_object = False
else:
self.wnid: str = data["wnid"]
self.wcategory: str = data["wcategory"]
self.scale_factor: float = data["scale_factor"]
self.do_not_use: bool = data["do_not_use"]
self.do_not_use_reason: str = data["do_not_use_reason"]
self.flex: bool = data["flex"]
self.substructure: List[dict] = data["substructure"]
self.bounds: Dict[str, Dict[str, float]] = data["bounds"]
self.canonical_rotation: Dict[str, float] = data["canonical_rotation"]
self.physics_quality: float = data["physics_quality"]
self.asset_bundle_sizes: Dict[str, int] = data["asset_bundle_sizes"]
self.composite_object: bool = data["composite_object"]
class MaterialRecord(_Record):
"""
A record of a visual material asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
if data is None:
self.type: str = "Ceramic"
else:
self.type: str = data["type"]
class SceneRecord(_Record):
"""
A record of a scene asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
if data is None:
self.description: str = ""
self.hdri: bool = False
self.location: str = ""
else:
self.description: str = data["description"]
self.hdri: bool = data["hdri"]
self.location: str = data["location"]
class HDRISkyboxRecord(_Record):
"""
A record of an HDRI skybox asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
if data is None:
self.color_temperature: float = 0
self.sun_elevation: float = 0
self.sun_initial_angle: float = 0
self.sun_intensity: float = 0
self.initial_skybox_rotation: float = 0
self.exposure: float = 0
self.location: str = ""
else:
self.color_temperature: float = data["color_temperature"]
self.sun_elevation: float = data["sun_elevation"]
self.sun_initial_angle: float = data["sun_initial_angle"]
self.sun_intensity: float = data["sun_intensity"]
self.initial_skybox_rotation: float = data["initial_skybox_rotation"]
self.exposure: float = data["exposure"]
self.location: str = data["location"]
class HumanoidAnimationRecord(_Record):
"""
A record for a humanoid animation asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
if data is None:
self.duration: float = 0
self.loop: bool = False
self.framerate: int = 0
else:
self.duration: float = data["duration"]
self.loop: bool = data["loop"]
self.framerate: int = data["framerate"]
def get_num_frames(self) -> int:
"""
Returns the number of frames, given the duration and framerate.
"""
return int(self.duration * self.framerate)
class HumanoidRecord(_Record):
"""
A record for a humanoid asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
class RobotRecord(_Record):
"""
A record for a robot asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
self.source: str = data["source"]
self.immovable: bool = data["immovable"]
self.targets: dict = data["targets"]
T = TypeVar("T", bound=_Record)
class _Librarian(Generic[T]):
"""
Base abstract class for a metadata librarian.
"""
def __init__(self, library: str = ""):
"""
:param library: The absolute path to the library .json file. If empty, a default path in the tdw module will be used.
"""
if library == "":
self.library = pkg_resources.resource_filename(__name__, "metadata_libraries/" + self.get_default_library())
else:
module_path = pkg_resources.resource_filename(__name__, "metadata_libraries/" + library)
if Path(module_path).exists():
self.library = module_path
else:
self.library = library
with open(self.library, "rt") as f:
self.data = json.load(f)
self.description = self.data["description"]
self.records: List[T] = []
for key in self.data["records"]:
record = self._generate_record(self.data["records"][key])
temp_urls = dict()
# De-localize URLs
for p in record.urls:
# Set an absolute path.
absolute = False
for prefix in ["file:///", "http://", "https://"]:
if record.urls[p].startswith(prefix):
temp_urls[p] = record.urls[p]
absolute = True
# De-localize a local path.
if not absolute:
temp_urls[p] = f"file:///{str(Path(self.library).parent.joinpath(record.urls[p]).resolve())}"
temp_urls[p] = temp_urls[p].replace("\\", "/")
record.urls = temp_urls
self.records.append(record)
def get_default_library(self) -> str:
"""
Returns the default library path (which is always the first in the list of `get_library_filenames()`)
"""
return self.get_library_filenames()[0]
@staticmethod
def create_library(description: str, path: str) -> None:
"""
Create a new library JSON file.
:param path: The absolute filepath to the .json records database file.
:param description: A brief description of the library.
"""
path = Path(path)
data = {"description": description,
"records": {}}
path.write_text(json.dumps(data), encoding="utf-8")
print(f"Created new library: {path}")
@staticmethod
def get_library_filenames() -> List[str]:
"""
Returns a list of the filenames of the libraries of this type in the tdw module.
"""
raise Exception()
def get_record(self, name: str) -> Optional[T]:
"""
Returns a record with the specified name. If that record can't be found, returns None.
:param name: The name of the record.
"""
records = [r for r in self.records if r.name == name]
if len(records) == 0:
return None
else:
return records[0]
def search_records(self, search: str) -> List[T]:
"""
Returns a list of records whose names include the search keyword.
:param search: The string to search for in the model name.
"""
return [r for r in self.records if search in r.name]
def add_or_update_record(self, record: T, overwrite: bool, write: bool = True, quiet: bool = True) -> bool:
"""
Add a new record or update an existing record.
:param record: The record.
:param overwrite: If true, overwrite the record if it already exists.
:param write: If true, write the library data to disk (overwriting the existing file).
:param quiet: If true, silently correct the model name if need be.
"""
# Valid the name of the record.
name_ok, name, problems = self.get_valid_record_name(record.name, overwrite)
record.name = name
if not name_ok and not quiet:
print(f"Renaming this record to {name} because:")
for p in problems:
print(f"\t{p}")
added = False
if len([r for r in self.records if r.name == record.name]) > 0:
# If this record exists and we want to overwrite, update the record.
if overwrite:
records_list = [r for r in self.records if r.name != record.name]
records_list.append(record)
added = True
# Add the record.
else:
self.records.append(record)
added = True
# Write to disk.
if added:
if record.name in self.data["records"]:
self.data["records"][record.name] = record.get_serializable()
else:
self.data["records"].update({record.name: record.get_serializable()})
if write:
self.write()
return added
def remove_record(self, record: Union[str, T], write: bool = True) -> bool:
"""
Remove a record. Returns true if the record was removed.
:param record: The record or the name of the record.
:param write: If true, write the library data to disk (overwriting the existing file).
"""
if isinstance(record, str):
record_name = record
else:
record_name = record.name
records_list = [r for r in self.records if r.name != record_name]
removed = len(records_list) < len(self.records)
if removed:
del self.data["records"][record_name]
self.records = records_list
if write:
self.write()
return removed
def write(self, pretty=True) -> None:
"""
Write the data to disk.
:param pretty: Pretty print.
"""
with open(self.library, "wt") as f:
if pretty:
json.dump(self.data, f, sort_keys=True, indent=4)
else:
json.dump(self.data, f)
def get_valid_record_name(self, name: str, overwrite: bool) -> Tuple[bool, str, List[str]]:
"""
Generates a valid record name. Returns: true if the name is good as-is, the new name, and a list of problems with the old name.
:param name: The name of a record we'd like to add.
:param overwrite: If true, raise an exception if the record doesn't exist. Otherwise, overwrite. If False: If the record exists, suggest a new name.
"""
record_names = [r.name for r in self.records]
if overwrite and name not in record_names:
return False, name, [f"Can't override a record named {name} because no such record exists!"]
good_name = name[:]
ok = True
problems: List[str] = []
good_name = good_name.replace(" ", "_")
if good_name != name:
ok = False
problems.append("Name has spaces. They have been replaced with underscores.")
good_name = good_name.lower()
if good_name != name:
ok = False
problems.append("Name has uppercase letters. They are now all lowercase.")
if not overwrite and good_name in record_names:
ok = False
while good_name in record_names:
good_name = good_name + token_hex(2)
problems.append(f"A record named {name} already exists, and we don't want to overwrite it.")
return ok, good_name, problems
def _generate_record(self, data: dict) -> T:
"""
Generate a record of type T from JSON data.
:param data: The record JSON data.
"""
raise Exception("Not defined.")
class ModelLibrarian(_Librarian[ModelRecord]):
"""
Librarian class for model metadata.
"""
def get_model_wnids_and_wcategories(self) -> Dict[str, str]:
"""
Returns a dictionary of all model wnids and categories.
Key=wnid Value=category
"""
wnids: Dict[str, str] = {}
for model in self.records:
if model.wnid in wnids:
if wnids[model.wnid] != model.wcategory:
print(f"WARNING: Model {model.name} wcategory is {model.wcategory} (expected: {wnids[model.wnid]})")
else:
wnids.update({model.wnid: model.wcategory})
return wnids
def get_model_wnids(self) -> List[str]:
"""
Returns a list of all unique wnids in the database, sorted numerically.
"""
return sorted(set([r.wnid for r in self.records]))
def get_all_models_in_wnid(self, wnid: str) -> List[ModelRecord]:
"""
Returns a list of all models with the same wnid.
:param wnid: The WordNet ID.
"""
return [r for r in self.records if r.wnid == wnid]
def get_flex_models(self) -> List[ModelRecord]:
"""
Returns a list of all Flex-compatible models.
"""
return [r for r in self.records if r.flex]
@staticmethod
def get_library_filenames() -> List[str]:
return ["models_core.json", "models_full.json", "models_special.json", "models_flex.json"]
def _generate_record(self, data: dict) -> T:
return ModelRecord(data)
class MaterialLibrarian(_Librarian[MaterialRecord]):
"""
Librarian class for material metadata.
"""
def get_all_materials_of_type(self, material_type: str) -> List[MaterialRecord]:
"""
Returns a list of all material records of a given type.
:param material_type: The type of material.
"""
return [r for r in self.records if r.type == material_type]
def get_material_types(self) -> List[str]:
"""
Returns a list of all types of materials, sorted alphabetically.
"""
return sorted(set([r.type for r in self.records]))
@staticmethod
def get_library_filenames() -> List[str]:
return ["materials_med.json", "materials_low.json", "materials_high.json"]
def _generate_record(self, data: dict) -> T:
return MaterialRecord(data)
class SceneLibrarian(_Librarian[SceneRecord]):
"""
Librarian class for scene metadata.
"""
@staticmethod
def get_library_filenames() -> List[str]:
return ["scenes.json"]
def _generate_record(self, data: dict) -> T:
return SceneRecord(data)
class HDRISkyboxLibrarian(_Librarian[HDRISkyboxRecord]):
"""
Librarian class for HDRI skybox metadata.
"""
@staticmethod
def get_library_filenames() -> List[str]:
return ["hdri_skyboxes.json"]
def _generate_record(self, data: dict) -> T:
return HDRISkyboxRecord(data)
class HumanoidAnimationLibrarian(_Librarian[HumanoidAnimationRecord]):
"""
Librarian class for humanoid animation metadata.
"""
@staticmethod
def get_library_filenames() -> List[str]:
return ["humanoid_animations.json"]
def _generate_record(self, data: dict) -> T:
return HumanoidAnimationRecord(data)
class HumanoidLibrarian(_Librarian[HumanoidRecord]):
"""
Librarian class for humanoid metadata.
"""
@staticmethod
def get_library_filenames() -> List[str]:
return ["humanoids.json"]
def _generate_record(self, data: dict) -> T:
return HumanoidRecord(data)
class RobotLibrarian(_Librarian[RobotRecord]):
"""
Librarian class for robot metadata.
"""
@staticmethod
def get_library_filenames() -> List[str]:
return ["robots.json"]
def _generate_record(self, data: dict) -> T:
return RobotRecord(data)
| [
"[email protected]"
] | |
c35af357be8ae118dde3133aaaff753973cb786b | f756eedd0fd4cee9b369978d1e20287dd579e4da | /InmoovScript/services/7_Inmoov.py | ac1ef25dc0796c3cb41c2579757db642007edf0a | [] | no_license | linuxrodo/inmoov | cf02421443d6976f153a64c898e2c209e32cc246 | fe8391d6d59ccdf6bdf5b382872fdf4bf77f4b09 | refs/heads/master | 2021-01-21T09:06:52.835689 | 2017-02-23T04:07:38 | 2017-02-23T04:07:38 | 82,861,443 | 0 | 0 | null | 2017-02-22T23:05:24 | 2017-02-22T23:05:24 | null | UTF-8 | Python | false | false | 934 | py | # ##############################################################################
# INMOOV SERVICE
# ##############################################################################
# ##############################################################################
# MRL SERVICE CALL
# ##############################################################################
inMoov=i01
#varduinoright = Runtime.start("varduinoright","VirtualArduino")
#varduinoright.connect(MyRightPort)
#varduinoleft = Runtime.start("varduinoleft","VirtualArduino")
#varduinoleft.connect(MyLeftPort)
#Inmoov Left / right arduino connect
if ScriptType=="RightSide" or ScriptType=="Full":
right = Runtime.createAndStart("i01.right", "Arduino")
RightPortIsConnected=CheckArduinos(right,MyRightPort)
if ScriptType=="LeftSide" or ScriptType=="Full":
left = Runtime.createAndStart("i01.left", "Arduino")
LeftPortIsConnected=CheckArduinos(left,MyLeftPort) | [
"[email protected]"
] | |
5d7a771e779f0b24d4bc1ae2bf01ac98e9d0c325 | 9423dd5312d6c05f61ec902a26ff627c6ef58f97 | /Python/functions/get_middle_point.py | 6d5bb3a5c5f3d271d454c9c6da74dc57df5a617c | [] | no_license | NehvedovichVlad/small_tasks | 01c093b07d521da59c559591559d61e81829df0f | 1c4085e3a2f0a4530c82f57b98f0f83b18e68567 | refs/heads/main | 2023-03-17T15:49:04.480092 | 2021-03-11T20:29:05 | 2021-03-11T20:29:05 | 308,935,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | """"
Середина отрезка
Напишите функцию get_middle_point(x1, y1, x2, y2),
которая принимает в качестве аргументов координаты концов отрезка
(x_1; \, y_1)(x 1;y1) и (x_2; \, y_2)(x2;y2)
и возвращает координаты точки являющейся серединой данного отрезка.
"""
# -------------------------------------------------------------------------------------------------
# 1)вариант
def get_middle_point(x1, y1, x2, y2):
return (x1+x2)/2, (y1+y2)/2
x_1, y_1 = int(input()), int(input())
x_2, y_2 = int(input()), int(input())
x, y = get_middle_point(x_1, y_1, x_2, y_2)
print(x, y)
# -------------------------------------------------------------------------------------------------
# 2)вариант
def get_middle_point(x1, y1, x2, y2):
return (x1 + x2) / 2, (y1 + y2) / 2
print(*get_middle_point(int(input()), int(input()), int(input()), int(input())))
| [
"[email protected]"
] | |
cec69182b84e9aa6bff4f48d54f59182d811ddf5 | de847b2e9a5236887fb6a164fedc0e0c86b84e6c | /pythonturorial/workshopprograms/userinput.py | 0b0ce93aae289361bd5e6a95386c281114c27be5 | [] | no_license | raghuprasadks/pythonmicrosoftexam | 9a6bcafcdbc5bb6727278f421bb1a31dc5b7427b | 68dacab8aa98d0ff39f1f36c3ce8e666be3760a0 | refs/heads/master | 2020-09-15T02:51:06.809959 | 2020-02-12T01:18:42 | 2020-02-12T01:18:42 | 223,330,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | name = input("Enter your name")
print(type(name))
print('your name is ',name)
age = int(input("enter your age"))
print('your age is ',age)
nextyear = age +1
print('your age after one year',nextyear)
amount = float(input("Enter the payment made for purchase of fruits"))
print('float conversion',amount)
print("Enter names of your friends")
friends = eval(input("Enter names as a list"))
print('evaluated as list ',type(friends))
print('here comes your friends ',friends)
| [
"[email protected]"
] | |
8e9aecb12e6e5e2f8c0bc687ca323a81ccf17b40 | 4935e2ef7994222178f950319f9f8d3e2adfa543 | /summer/2018_07_26/4sum-ii.py | 93f49d2327c742fa53619df9e5a30f374a233dd2 | [] | no_license | shaheming/leecode | e853b59469b97ca97a5b4ecd80497b3dac3fb10f | a8b59573dc201438ebd5a5ab64e9ac61255a4abd | refs/heads/master | 2021-07-03T03:57:22.718410 | 2019-04-06T18:19:53 | 2019-04-06T18:19:53 | 140,241,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | #这个问题本来是一个 O(N^4) 但是通过拆解可以拆解为两个 O(2*N^2) 的问题
class Solution:
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
count = 0
dicA, dicB, dicC, dicD = {}, {}, {}, {}
for a in A:
for b in B:
if a + b in dicA:
dicA[a + b] += 1
else:
dicA[a + b] = 1
for c in C:
for d in D:
if -(c + d) in dicA:
count += dicA[-(c + d)]
return count
| [
"[email protected]"
] | |
61acdb8d432dd34f001d2c0e97dfee241beada2b | 0bcc7dba1f5f1738f9b11a259e63edcb39795a41 | /INFO1110/Lab6/sorter.py | c8f145941317de52a6354f0f760cb64eeb835ec3 | [] | no_license | mlumsden001/University-Notes | 3704b0a0e49a24d965aa24658a607a89c1dfa7da | e0040192204360e3bd3df7087738913c7763a331 | refs/heads/master | 2021-08-15T20:19:18.527082 | 2021-03-22T05:41:11 | 2021-03-22T05:41:11 | 246,731,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | import sys
def sort(sys.argv[1]):
| [
"[email protected]"
] | |
4d7505d380777b2beba7bed17181483a5992b5c4 | da9b9f75a693d17102be45b88efc212ca6da4085 | /sdk/appconfiguration/azure-appconfiguration/setup.py | cbab0ebedf6658d7f5da30366ae1070a9eca46c9 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | elraikhm/azure-sdk-for-python | e1f57b2b4d8cc196fb04eb83d81022f50ff63db7 | dcb6fdd18b0d8e0f1d7b34fdf82b27a90ee8eafc | refs/heads/master | 2021-06-21T22:01:37.063647 | 2021-05-21T23:43:56 | 2021-05-21T23:43:56 | 216,855,069 | 0 | 0 | MIT | 2019-10-22T16:05:03 | 2019-10-22T16:05:02 | null | UTF-8 | Python | false | false | 3,251 | py | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import sys
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-appconfiguration"
PACKAGE_PPRINT_NAME = "App Configuration Data"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.md', encoding='utf-8') as f:
history = f.read()
exclude_packages = [
'tests',
'samples',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
]
if sys.version_info < (3, 5, 3):
exclude_packages.extend([
'*.aio',
'*.aio.*'
])
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft {} Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/appconfiguration/azure-appconfiguration',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=exclude_packages),
install_requires=[
"msrest>=0.6.10",
"azure-core<2.0.0,>=1.0.0b5",
],
extras_require={
":python_version<'3.0'": ['azure-nspkg'],
":python_version<'3.4'": ['enum34>=1.0.4'],
":python_version<'3.5'": ['typing'],
"async:python_version>='3.5'": [
'aiohttp>=3.0',
'aiodns>=2.0'
],
}
) | [
"[email protected]"
] | |
32c207f3631eab9b520c22cef2980be18016e080 | 8b7d98c5077d1607568460ce5ae8da801b11293a | /accounts/forms.py | f47b51e9149eed83879485476cefed208ceca865 | [] | no_license | Th0rn-dev/kiteupru | de0e93fd791522433e2ab34efac1e86a0cb0f613 | df240ff50f51b390f7e27ca35841c6482642d97d | refs/heads/master | 2023-05-04T13:44:05.561708 | 2021-05-30T19:01:59 | 2021-05-30T19:45:47 | 372,293,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | from django import forms
from .models import Profile
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('avatar',) | [
"[email protected]"
] | |
21190bb8e62dd782eafae6f70363d5471f54ebd4 | 39b35326534d6efa8a60344ef59eac3d8cea562f | /crudpj/crudpj/wsgi.py | b639603353b77ea9655595ff087206ea6ebb8995 | [] | no_license | Hyo-gyeong/Django_review | 8635e8311111cab56066c6b87429c7f57c5e42c3 | 8b59d717c0c8c4404230c8eaa42e6074cacdd712 | refs/heads/master | 2021-01-03T08:32:06.706689 | 2020-08-31T04:55:59 | 2020-08-31T04:55:59 | 240,000,924 | 0 | 0 | null | 2020-08-17T19:21:30 | 2020-02-12T11:53:19 | Python | UTF-8 | Python | false | false | 405 | py | """
WSGI config for crudpj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crudpj.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
4a404290eef8c70049ea154977a634238d6797a0 | a72f39b82966cd6e2a3673851433ce7db550429a | /imix/data/loaders/visual_dialog_dataset.py | 1ff1aed7483afe4283ef3a5267e1db67c5410fa9 | [
"Apache-2.0"
] | permissive | linxi1158/iMIX | 85841d6b95e1d99ed421a1ac3667658e49cae6fc | af87a17275f02c94932bb2e29f132a84db812002 | refs/heads/master | 2023-06-09T23:37:46.534031 | 2021-06-30T12:09:42 | 2021-06-30T12:09:42 | 381,608,650 | 0 | 0 | Apache-2.0 | 2021-06-30T07:08:40 | 2021-06-30T07:08:39 | null | UTF-8 | Python | false | false | 10,038 | py | from torch.utils.data import Dataset
from imix.data.reader.visual_dialog_reader import VisDiaReader
from imix.data.infocomp.visual_dialog_infocpler import VisDiaInfoCpler
from imix.data.builder import DATASETS
import torch
import json
from transformers.tokenization_bert import BertTokenizer
from imix.data.reader.feature_reader.image_features_reader import ImageFeaturesH5Reader
from ..utils.data_utils import encode_input, encode_image_input
import os
dataset_root_path = '/home/datasets/mix_data/iMIX/data/datasets/visdial_data/'
@DATASETS.register_module()
class VisDialDataset(Dataset):
def __init__(self, reader, info_cpler, limit_nums=None):
self.reader = VisDiaReader(reader)
self.info_cpler = VisDiaInfoCpler(info_cpler)
self._limit_sample_nums = limit_nums
self._splits = self.reader.splits
def __len__(self):
if self._limit_sample_nums and self._limit_sample_nums > 0:
return min(len(self.reader), self._limit_sample_nums)
return len(self.reader)
def __getitem__(self, idx):
item_feature = self.reader[idx]
item = self.info_cpler.complete_info(item_feature=item_feature, split=self._splits[0])
return item
@DATASETS.register_module()
class VisdialDatasetDense(Dataset):
params = {
'num_train_samples':
0,
'num_val_samples':
0,
'visdial_image_feats':
os.path.join(dataset_root_path, 'features', 'visdial_img_feat.lmdb'),
'visdial_processed_train_dense':
os.path.join(dataset_root_path, 'pre_process_annotations', 'visdial_1.0_train_dense_processed.json'),
'visdial_processed_val':
os.path.join(dataset_root_path, 'pre_process_annotations', 'visdial_1.0_val_processed.json'),
'visdial_processed_train_dense_annotations':
os.path.join(dataset_root_path, 'pre_process_annotations',
'visdial_1.0_train_dense_annotations_processed.json'),
'visdial_processed_val_dense_annotations':
os.path.join(dataset_root_path, 'pre_process_annotations', 'visdial_1.0_val_dense_annotations_processed.json'),
'num_options':
100,
'visdial_tot_rounds':
11,
'overfit':
None,
'max_seq_len':
256,
}
def __init__(self):
"""Initialization."""
params = self.params
self.numDataPoints = {}
num_samples_train = params['num_train_samples']
num_samples_val = params['num_val_samples']
self._image_features_reader = ImageFeaturesH5Reader(params['visdial_image_feats'])
with open(params['visdial_processed_train_dense']) as f:
self.visdial_data_train = json.load(f)
if params['overfit']:
if num_samples_train:
self.numDataPoints['train'] = num_samples_train
else:
self.numDataPoints['train'] = 5
else:
if num_samples_train:
self.numDataPoints['train'] = num_samples_train
else:
self.numDataPoints['train'] = len(self.visdial_data_train['data']['dialogs'])
with open(params['visdial_processed_val']) as f:
self.visdial_data_val = json.load(f)
if params['overfit']:
if num_samples_val:
self.numDataPoints['val'] = num_samples_val
else:
self.numDataPoints['val'] = 5
else:
if num_samples_val:
self.numDataPoints['val'] = num_samples_val
else:
self.numDataPoints['val'] = len(self.visdial_data_val['data']['dialogs'])
self.overfit = params['overfit']
with open(params['visdial_processed_train_dense_annotations']) as f:
self.visdial_data_train_ndcg = json.load(f)
with open(params['visdial_processed_val_dense_annotations']) as f:
self.visdial_data_val_ndcg = json.load(f)
# train val setup
self.numDataPoints['trainval'] = self.numDataPoints['train'] + self.numDataPoints['val']
self.num_options = params['num_options']
self._split = 'train'
self.subsets = ['train', 'val', 'trainval']
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.tokenizer = tokenizer
# fetching token indicecs of [CLS] and [SEP]
tokens = ['[CLS]', '[MASK]', '[SEP]']
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens)
self.CLS = indexed_tokens[0]
self.MASK = indexed_tokens[1]
self.SEP = indexed_tokens[2]
self.params = params
self._max_region_num = 37
def __len__(self):
return self.numDataPoints[self._split]
@property
def split(self):
return self._split
@split.setter
def split(self, split):
assert split in self.subsets
self._split = split
def __getitem__(self, index):
def pruneRounds(context, num_rounds):
start_segment = 1
len_context = len(context)
cur_rounds = (len(context) // 2) + 1
l_index = 0
if cur_rounds > num_rounds:
# caption is not part of the final input
l_index = len_context - (2 * num_rounds)
start_segment = 0
return context[l_index:], start_segment
# Combining all the dialog rounds with the [SEP] and [CLS] token
MAX_SEQ_LEN = self.params['max_seq_len']
cur_data = None
cur_dense_annotations = None
if self._split == 'train':
cur_data = self.visdial_data_train['data']
cur_dense_annotations = self.visdial_data_train_ndcg
elif self._split == 'val':
if self.overfit:
cur_data = self.visdial_data_train['data']
cur_dense_annotations = self.visdial_data_train_ndcg
else:
cur_data = self.visdial_data_val['data']
cur_dense_annotations = self.visdial_data_val_ndcg
else:
if index >= self.numDataPoints['train']:
cur_data = self.visdial_data_val
cur_dense_annotations = self.visdial_data_val_ndcg
index -= self.numDataPoints['train']
else:
cur_data = self.visdial_data_train
cur_dense_annotations = self.visdial_data_train_ndcg
# number of options to score on
num_options = self.num_options
assert num_options == 100
dialog = cur_data['dialogs'][index]
cur_questions = cur_data['questions']
cur_answers = cur_data['answers']
img_id = dialog['image_id']
assert img_id == cur_dense_annotations[index]['image_id']
cur_rnd_utterance = [self.tokenizer.encode(dialog['caption'])]
options_all = []
cur_rounds = cur_dense_annotations[index]['round_id']
for rnd, utterance in enumerate(dialog['dialog'][:cur_rounds]):
cur_rnd_utterance.append(self.tokenizer.encode(cur_questions[utterance['question']]))
if rnd != cur_rounds - 1:
cur_rnd_utterance.append(self.tokenizer.encode(cur_answers[utterance['answer']]))
for answer_option in dialog['dialog'][cur_rounds - 1]['answer_options']:
cur_option = cur_rnd_utterance.copy()
cur_option.append(self.tokenizer.encode(cur_answers[answer_option]))
options_all.append(cur_option)
assert len(cur_option) == 2 * cur_rounds + 1
gt_option = dialog['dialog'][cur_rounds - 1]['gt_index']
tokens_all = []
mask_all = []
segments_all = []
sep_indices_all = []
hist_len_all = []
for _, option in enumerate(options_all):
option, start_segment = pruneRounds(option, self.params['visdial_tot_rounds'])
tokens, segments, sep_indices, mask = encode_input(
option, start_segment, self.CLS, self.SEP, self.MASK, max_seq_len=MAX_SEQ_LEN, mask_prob=0)
tokens_all.append(tokens)
mask_all.append(mask)
segments_all.append(segments)
sep_indices_all.append(sep_indices)
hist_len_all.append(torch.LongTensor([len(option) - 1]))
tokens_all = torch.cat(tokens_all, 0)
mask_all = torch.cat(mask_all, 0)
segments_all = torch.cat(segments_all, 0)
sep_indices_all = torch.cat(sep_indices_all, 0)
hist_len_all = torch.cat(hist_len_all, 0)
item = {}
item['tokens'] = tokens_all.unsqueeze(0)
item['segments'] = segments_all.unsqueeze(0)
item['sep_indices'] = sep_indices_all.unsqueeze(0)
item['mask'] = mask_all.unsqueeze(0)
item['hist_len'] = hist_len_all.unsqueeze(0)
item['image_id'] = torch.LongTensor([img_id])
# add image features. Expand them to create batch * num_rounds * num options * num bbox * img feats
features, num_boxes, boxes, _, image_target = self._image_features_reader[img_id]
features, spatials, image_mask, image_target, image_label = encode_image_input(
features, num_boxes, boxes, image_target, max_regions=self._max_region_num, mask_prob=0)
item['image_feat'] = features
item['image_loc'] = spatials
item['image_mask'] = image_mask
item['image_target'] = image_target
item['image_label'] = image_label
# add dense annotation fields
item['gt_relevance_round_id'] = torch.LongTensor([cur_rounds])
item['gt_relevance'] = torch.Tensor(cur_dense_annotations[index]['relevance'])
item['gt_option'] = torch.LongTensor([gt_option])
# add next sentence labels for training with the nsp loss as well
nsp_labels = torch.ones(*tokens_all.unsqueeze(0).shape[:-1])
nsp_labels[:, gt_option] = 0
item['next_sentence_labels'] = nsp_labels.long()
return item
| [
"[email protected]"
] | |
69902164ea9b3ea1f9ce378a4254075c62c0dac7 | bc0938b96b86d1396cb6b403742a9f8dbdb28e4c | /aliyun-python-sdk-alidns/aliyunsdkalidns/request/v20150109/DescribeDomainStatisticsRequest.py | 067b160a177535f3387786359d536dd3e134c0fa | [
"Apache-2.0"
] | permissive | jia-jerry/aliyun-openapi-python-sdk | fb14d825eb0770b874bc123746c2e45efaf64a6d | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | refs/heads/master | 2022-11-16T05:20:03.515145 | 2020-07-10T08:45:41 | 2020-07-10T09:06:32 | 278,590,780 | 0 | 0 | NOASSERTION | 2020-07-10T09:15:19 | 2020-07-10T09:15:19 | null | UTF-8 | Python | false | false | 1,896 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalidns.endpoint import endpoint_data
class DescribeDomainStatisticsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alidns', '2015-01-09', 'DescribeDomainStatistics','alidns')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_StartDate(self):
return self.get_query_params().get('StartDate')
def set_StartDate(self,StartDate):
self.add_query_param('StartDate',StartDate)
def get_EndDate(self):
return self.get_query_params().get('EndDate')
def set_EndDate(self,EndDate):
self.add_query_param('EndDate',EndDate)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | [
"[email protected]"
] | |
3d6106b6e7e3d37af803f11255cad2346a387720 | 434a76f2a39b6152e18f25c092e2d3e272bcaa7d | /api/views/blockchains/resources.py | b30b53e5a6ba756d1d935653476963c9e299f4e2 | [
"Apache-2.0"
] | permissive | DaCeige/machinaris | fce98168d0ec288b47c37662079cbb928975badc | 2d3837c8af00bb41162f8be1cbf6eaf1cb6c6fdb | refs/heads/main | 2023-08-24T13:10:22.511119 | 2021-10-07T18:55:25 | 2021-10-07T18:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,326 | py | import datetime as dt
from flask.views import MethodView
from api import app
from api.extensions.api import Blueprint, SQLCursorPage
from common.extensions.database import db
from common.models import Blockchain
from .schemas import BlockchainSchema, BlockchainQueryArgsSchema
blp = Blueprint(
'Blockchains',
__name__,
url_prefix='/blockchains',
description="Operations on blockchains"
)
@blp.route('/')
class Blockchains(MethodView):
@blp.etag
@blp.arguments(BlockchainQueryArgsSchema, location='query')
@blp.response(200, BlockchainSchema(many=True))
@blp.paginate(SQLCursorPage)
def get(self, args):
return db.session.query(Blockchain).filter_by(**args)
@blp.etag
@blp.arguments(BlockchainSchema)
@blp.response(201, BlockchainSchema)
def post(self, new_item):
item = db.session.query(Blockchain).filter(Blockchain.hostname==new_item['hostname'], \
Blockchain.blockchain==new_item['blockchain']).first()
if item: # upsert
new_item['created_at'] = item.created_at
new_item['updated_at'] = dt.datetime.now()
BlockchainSchema().update(item, new_item)
else: # insert
item = Blockchain(**new_item)
db.session.add(item)
db.session.commit()
return item
@blp.route('/<hostname>/<blockchain>')
class BlockchainsByHostname(MethodView):
@blp.etag
@blp.response(200, BlockchainSchema)
def get(self, hostname):
return db.session.query(Blockchain).get_or_404(hostname)
@blp.etag
@blp.arguments(BlockchainSchema)
@blp.response(200, BlockchainSchema)
def put(self, new_item, hostname, blockchain):
item = db.session.query(Blockchain).get_or_404(hostname)
new_item['hostname'] = item.hostname
new_item['created_at'] = item.created_at
new_item['updated_at'] = dt.datetime.now()
blp.check_etag(item, BlockchainSchema)
BlockchainSchema().update(item, new_item)
db.session.add(item)
db.session.commit()
return item
@blp.etag
@blp.response(204)
def delete(self, hostname):
item = db.session.query(Blockchain).get_or_404(hostname)
blp.check_etag(item, BlockchainSchema)
db.session.delete(item)
db.session.commit() | [
"[email protected]"
] | |
a4179dd3d6f20a183b344f05c1d32a059c61e4a6 | c3eabffad6813a285ea1aa554ff05ef3e798bfa7 | /bubbleshoot/utils.py | d1690f1bba79a87551017310240add1a42ba2277 | [] | no_license | ranjian0/mini-games | 8a96eef730cffccd548cd70dee3349ad31502ba3 | 34bb48456ed6e3fc0d82a4c936e01c48bf0e8f47 | refs/heads/master | 2020-03-19T00:09:24.914583 | 2020-01-08T15:32:46 | 2020-01-08T15:32:46 | 135,455,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,516 | py | import os
import math, random
import pygame as pg
from pygame.sprite import *
# from player import Player
# from enemy import EnemySpawner
TRANSPARENT = (0, 0, 0, 0)
def random_pos(target, dist):
pad = 100
max_ = pg.display.get_surface().get_size()
pos = [random.randint(0, n - pad) for n in max_]
# Ensure the random point is more that dist away from target
if pg.math.Vector2(pos[0] - target[0], pos[1] - target[1]).length() < dist:
return random_pos(target, dist)
else:
return pos
def media_path(fn):
path = os.path.join(os.path.dirname(__file__), "media")
return os.path.join(path, fn)
class Bullet(Sprite):
def __init__(self, pos, angle, color=pg.Color("black")):
Sprite.__init__(self)
size = (5, 5)
self.color = color
self.image = self.make_image(size)
self.rect = self.image.get_rect(center=pos)
self.true_pos = list(self.rect.center)
self.angle = -math.radians(angle - 90)
self.speed = 5
def make_image(self, size):
img = pg.Surface(size).convert_alpha()
img.fill(TRANSPARENT)
rect = img.get_rect()
pg.draw.rect(img, self.color, [0, 0, size[0], size[1]])
return img
def update(self, dt):
self.true_pos[0] += math.cos(self.angle) * self.speed
self.true_pos[1] += math.sin(self.angle) * self.speed
self.rect.topleft = self.true_pos
self.remove()
def remove(self):
screen_rect = pg.display.get_surface().get_rect()
if not self.rect.colliderect(screen_rect):
self.kill()
class DamageBar(Sprite):
def __init__(self, pos, size=(200, 25), color=pg.Color("green")):
Sprite.__init__(self)
self.size = size
self.pos = pos
self.color = color
self.image = self.make_image(size)
self.rect = self.image.get_rect(center=pos)
self.true_pos = list(self.rect.center)
def make_image(self, size, fill_percent=1):
img = pg.Surface(size).convert_alpha()
img.fill(TRANSPARENT)
rect = img.get_rect()
pg.draw.rect(img, pg.Color("black"), rect)
rect2 = rect.inflate(-10, -10).copy()
rect2.width *= fill_percent
pg.draw.rect(img, self.color, rect2)
return img
def update(self, sprite):
health_percent = sprite.health / sprite.max_health
self.image = self.make_image(self.size, health_percent)
self.rect = self.image.get_rect(center=self.rect.center)
class Option:
hovered = False
def __init__(self, text, pos, font):
self.text = text
self.pos = pos
self.font = font
self.set_rect()
self.draw()
def draw(self):
self.set_rend()
screen = pg.display.get_surface()
screen.blit(self.rend, self.rect)
def set_rend(self):
self.rend = self.font.render(self.text, True, self.get_color())
def get_color(self):
if self.hovered:
return (255, 255, 255)
else:
return (100, 100, 100)
def set_rect(self):
self.set_rend()
self.rect = self.rend.get_rect()
self.rect.center = self.pos
class MainMenu:
def __init__(self):
self.font = pg.font.Font(None, 72)
size = pg.display.get_surface().get_size()
off_x = size[0] / 2
off_y = size[1] / 2
self.options = [
Option("PLAY GAME", (off_x, off_y - 80), self.font),
Option("CREDITS", (off_x, off_y), self.font),
Option("EXIT", (off_x, off_y + 80), self.font),
]
# Title image
self.title = pg.image.load(media_path("title.png"))
self.title_rect = self.title.get_rect(center=(off_x, 70))
def draw(self, *args):
# Draw title image
screen = pg.display.get_surface()
screen.blit(self.title, self.title_rect)
# Draw Options
for option in self.options:
if option.rect.collidepoint(pg.mouse.get_pos()):
option.hovered = True
else:
option.hovered = False
option.draw()
def on_mouse(self):
for option in self.options:
if option.rect.collidepoint(pg.mouse.get_pos()):
return option.text
class Credits:
def __init__(self):
size = pg.display.get_surface().get_size()
# Credits Text
self._font = pg.font.Font(None, 30)
self.text = """
ESCAPE SHOOTER
Author
````````
Ian Ichung'wah Karanja
Description
`````````````
This game was created between 1/5/17 and 3/5/17.
The Player is challenged to kill enemies that are
roaming about. Proximity to the enemies triggers an
alert that causes them to chase and shoot at you.
How many enemies can you kill before you die?
Enjoy.
""".lstrip()
# Credits Back Button
self.font = pg.font.Font(None, 72)
pad_x = (size[0] - self.font.size("BACK")[0]) / 2
self.options = [Option("BACK", (100, size[1] - 50), self.font)]
def draw(self, *args):
# Draw Credits Text
screen = pg.display.get_surface()
size = pg.display.get_surface().get_size()
lines = self.text.splitlines()[1:]
for idx, l in enumerate(lines):
# Determine x padding
l_size = self._font.size(l)[0]
off_x = (size[0] - l_size) / 2
screen.blit(
self._font.render(l, True, (232, 122, 49)), (off_x, 10 + (idx * 30))
)
# Draw Back button
for option in self.options:
if option.rect.collidepoint(pg.mouse.get_pos()):
option.hovered = True
else:
option.hovered = False
option.draw()
def on_mouse(self):
for option in self.options:
if option.rect.collidepoint(pg.mouse.get_pos()):
return option.text
class GameOver:
def __init__(self):
self.font = pg.font.Font(None, 72)
self.size = pg.display.get_surface().get_size()
off_x = self.size[0] / 2
off_y = self.size[1] / 2
self.options = [
Option("RESTART", (off_x, self.size[1] - 250), self.font),
Option("MAIN MENU", (off_x, self.size[1] - 150), self.font),
Option("EXIT", (off_x, self.size[1] - 50), self.font),
]
# Title image
path = os.path.join(os.path.dirname(__file__), "media")
file = "title.png"
self.title = pg.image.load(os.path.join(path, file))
self.title_rect = self.title.get_rect(center=(off_x, 70))
# Enemies killed text
self.font = pg.font.Font(None, 72)
def draw(self, *args):
off_x = self.size[0] / 2
off_y = self.size[1] / 2
# Draw title image
screen = pg.display.get_surface()
screen.blit(self.title, self.title_rect)
# Draw Killed text
text = " {} enemies killed !".format(args[0])
self.killed_text = self.font.render(text, True, (230, 0, 0))
self.killed_rect = self.killed_text.get_rect(center=(off_x, off_y - 100))
screen.blit(self.killed_text, self.killed_rect)
for option in self.options:
if option.rect.collidepoint(pg.mouse.get_pos()):
option.hovered = True
else:
option.hovered = False
option.draw()
def on_mouse(self):
for option in self.options:
if option.rect.collidepoint(pg.mouse.get_pos()):
return option.text
class PauseMenu:
def __init__(self):
self.font = pg.font.Font(None, 72)
size = pg.display.get_surface().get_size()
off_x = size[0] / 2
off_y = size[1] / 2
text = "PAUSED"
self.text_surf = self.font.render(text, True, (232, 122, 49))
self.text_rect = self.text_surf.get_rect()
self.text_rect.center = (off_x, off_y)
def draw(self, *args):
screen = pg.display.get_surface()
screen.blit(self.text_surf, self.text_rect)
class MenuSystem:
def __init__(self):
self.active = True
self.active_menu = 0
self.menus = [MainMenu(), Credits(), GameOver(), PauseMenu()]
# Game State
self.quit = False
pg.mixer.music.load(media_path("menu_loop.wav"))
pg.mixer.music.play(-1, 0.0)
def draw(self, *args):
self.menus[self.active_menu].draw(*args)
def on_mouse(self, reset_func):
option = self.menus[self.active_menu].on_mouse()
if option == "PLAY GAME":
self.active = False
elif option == "EXIT":
self.quit = True
elif option == "CREDITS":
self.set_credits()
elif option == "BACK":
self.set_main()
elif option == "RESTART":
reset_func()
pg.mixer.music.play(-1, 0.0)
self.active = False
elif option == "MAIN MENU":
reset_func()
self.set_main()
def set_main(self):
self.active_menu = 0
pg.mixer.music.play(-1, 0.0)
def set_credits(self):
self.active_menu = 1
def set_gameover(self):
self.active_menu = 2
pg.mixer.music.stop()
def set_pause(self):
self.active_menu = 3
| [
"[email protected]"
] | |
eb072ee218d2a1895d7da00df4591fd81018b7c7 | 584db1be8b6bdedaa56d186692ad72da5ee07164 | /patron/tests/unit/virt/xenapi/test_driver.py | f8674f0cec1082ea4a77834f9a8001aa2c43c8e8 | [
"Apache-2.0"
] | permissive | casbin/openstack-patron | 66006f57725cf1c3d735cd5529d3459fd77384c8 | b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25 | refs/heads/master | 2023-05-31T05:23:37.721768 | 2015-12-31T12:18:17 | 2015-12-31T12:18:17 | 382,054,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,308 | py | # Copyright (c) 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import mock
from oslo_utils import units
from patron.compute import arch
from patron.tests.unit.virt.xenapi import stubs
from patron.virt import driver
from patron.virt import fake
from patron.virt import xenapi
from patron.virt.xenapi import driver as xenapi_driver
class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Driver operations."""
def _get_driver(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.flags(connection_url='test_url',
connection_password='test_pass', group='xenserver')
return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
def host_stats(self, refresh=True):
return {'host_memory_total': 3 * units.Mi,
'host_memory_free_computed': 2 * units.Mi,
'disk_total': 5 * units.Gi,
'disk_used': 2 * units.Gi,
'disk_allocated': 4 * units.Gi,
'host_hostname': 'somename',
'supported_instances': arch.X86_64,
'host_cpu_info': {'cpu_count': 50},
'cpu_model': {
'vendor': 'GenuineIntel',
'model': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz',
'topology': {
'sockets': 1,
'cores': 4,
'threads': 1,
},
'features': [
'fpu', 'de', 'tsc', 'msr', 'pae', 'mce',
'cx8', 'apic', 'sep', 'mtrr', 'mca',
'cmov', 'pat', 'clflush', 'acpi', 'mmx',
'fxsr', 'sse', 'sse2', 'ss', 'ht',
'nx', 'constant_tsc', 'nonstop_tsc',
'aperfmperf', 'pni', 'vmx', 'est', 'ssse3',
'sse4_1', 'sse4_2', 'popcnt', 'hypervisor',
'ida', 'tpr_shadow', 'vnmi', 'flexpriority',
'ept', 'vpid',
],
},
'vcpus_used': 10,
'pci_passthrough_devices': '',
'host_other-config': {'iscsi_iqn': 'someiqn'}}
def test_available_resource(self):
driver = self._get_driver()
driver._session.product_version = (6, 8, 2)
self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
resources = driver.get_available_resource(None)
self.assertEqual(6008002, resources['hypervisor_version'])
self.assertEqual(50, resources['vcpus'])
self.assertEqual(3, resources['memory_mb'])
self.assertEqual(5, resources['local_gb'])
self.assertEqual(10, resources['vcpus_used'])
self.assertEqual(3 - 2, resources['memory_mb_used'])
self.assertEqual(2, resources['local_gb_used'])
self.assertEqual('xen', resources['hypervisor_type'])
self.assertEqual('somename', resources['hypervisor_hostname'])
self.assertEqual(1, resources['disk_available_least'])
def test_overhead(self):
driver = self._get_driver()
instance = {'memory_mb': 30720, 'vcpus': 4}
# expected memory overhead per:
# https://wiki.openstack.org/wiki/XenServer/Overhead
expected = ((instance['memory_mb'] * xenapi_driver.OVERHEAD_PER_MB) +
(instance['vcpus'] * xenapi_driver.OVERHEAD_PER_VCPU) +
xenapi_driver.OVERHEAD_BASE)
expected = math.ceil(expected)
overhead = driver.estimate_instance_overhead(instance)
self.assertEqual(expected, overhead['memory_mb'])
def test_set_bootable(self):
driver = self._get_driver()
self.mox.StubOutWithMock(driver._vmops, 'set_bootable')
driver._vmops.set_bootable('inst', True)
self.mox.ReplayAll()
driver.set_bootable('inst', True)
def test_post_interrupted_snapshot_cleanup(self):
driver = self._get_driver()
fake_vmops_cleanup = mock.Mock()
driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup
driver.post_interrupted_snapshot_cleanup("context", "instance")
fake_vmops_cleanup.assert_called_once_with("context", "instance")
def test_public_api_signatures(self):
inst = self._get_driver()
self.assertPublicAPISignatures(driver.ComputeDriver(None), inst)
def test_get_volume_connector(self):
ip = '123.123.123.123'
driver = self._get_driver()
self.flags(connection_url='http://%s' % ip,
connection_password='test_pass', group='xenserver')
self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
connector = driver.get_volume_connector({'uuid': 'fake'})
self.assertIn('ip', connector)
self.assertEqual(connector['ip'], ip)
self.assertIn('initiator', connector)
self.assertEqual(connector['initiator'], 'someiqn')
def test_get_block_storage_ip(self):
my_ip = '123.123.123.123'
connection_ip = '124.124.124.124'
driver = self._get_driver()
self.flags(connection_url='http://%s' % connection_ip,
group='xenserver')
self.flags(my_ip=my_ip, my_block_storage_ip=my_ip)
ip = driver._get_block_storage_ip()
self.assertEqual(connection_ip, ip)
def test_get_block_storage_ip_conf(self):
driver = self._get_driver()
my_ip = '123.123.123.123'
my_block_storage_ip = '124.124.124.124'
self.flags(my_ip=my_ip, my_block_storage_ip=my_block_storage_ip)
ip = driver._get_block_storage_ip()
self.assertEqual(my_block_storage_ip, ip)
| [
"[email protected]"
] | |
0c31c2c12ba0cee2fca07eaa29b494befb80343a | 1626e16760c9c5b5dc9bd7c345871c716d5ffd99 | /Problems/0001_0099/0037_Sudoku_Solver/Project_Python3/Solution1.py | 55219e728189a2d7038f6a589b53cbfbcce69186 | [] | no_license | NobuyukiInoue/LeetCode | 94ddb19e63cb8d0775cdc13f311fe90c87a1d718 | 3f0ffd519404165fd1a735441b212c801fd1ad1e | refs/heads/master | 2023-09-01T07:38:50.939942 | 2023-08-23T09:51:17 | 2023-08-23T09:51:17 | 158,100,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | class Solution:
# def solveSudoku(self, board: List[List[str]]) -> None:
def solveSudoku(self, board):
"""
Do not return anything, modify board in-place instead.
"""
from collections import defaultdict
nums = [str(i) for i in range(1, 10)]
rows, cols, cells, empty = defaultdict(set), defaultdict(set), defaultdict(set), set()
for i in range(9):
for j in range(9):
if board[i][j] == '.':
empty.add((i, j))
else:
rows[i].add(board[i][j])
cols[j].add(board[i][j])
cells[i//3, j//3].add(board[i][j])
def fill():
i, j = max(empty, key=lambda x: len(rows[x[0]]) + len(cols[x[1]]) + len(cells[x[0]//3, x[1]//3]))
empty.remove((i, j))
for num in nums:
if not (num in rows[i] or num in cols[j] or num in cells[i//3, j//3]):
board[i][j] = num; rows[i].add(num); cols[j].add(num); cells[i//3, j//3].add(num)
if not empty: return True
if fill(): return True
board[i][j] = '.'; rows[i].remove(num); cols[j].remove(num); cells[i//3, j//3].remove(num)
empty.add((i, j))
return False
if not empty: return
_ = fill()
| [
"[email protected]"
] | |
9e147454ebbe583aae958e4c95cc4a87cd3a44ab | 5f9695616cce1c03013ae9a5e823ad686bf33b6e | /tests/test_unet2.py | 8d5f6867f0a173f479a72b89d2328f66e9994c1f | [
"MIT"
] | permissive | caiyunapp/leibniz | c9567685cafbc618d22487e408a27ba21cc8633e | 40bb6f088c5325701ca53506d7a66eb0a9ef4fef | refs/heads/master | 2023-04-16T18:31:27.194818 | 2021-09-13T15:54:57 | 2021-09-13T15:54:57 | 208,940,378 | 16 | 5 | null | null | null | null | UTF-8 | Python | false | false | 11,603 | py | # -*- coding: utf-8 -*-
import unittest
import torch as th
from leibniz.nn.net import resunet2
from leibniz.nn.layer.hyperbolic import HyperBasic
from leibniz.nn.layer.hyperbolic import HyperBottleneck
from leibniz.nn.layer.senet import SEBasicBlock, SEBottleneck
from leibniz.nn.layer.hyperbolic2 import HyperBasic as HyperBasic2, HyperBottleneck as HyperBottleneck2
class TestUnet(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test1D(self):
net = resunet2(1, 1, spatial=(32,))
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance')
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer')
net(th.rand(1, 1, 16))
def test2D(self):
resunet2(1, 1, spatial=(16, 16))
resunet2(1, 1, spatial=(16, 32))
resunet2(1, 1, spatial=(32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]])
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance')
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer')
net(th.rand(1, 1, 32, 16))
def test3D(self):
resunet2(1, 1, spatial=(16, 16, 16))
resunet2(1, 1, spatial=(32, 16, 16))
resunet2(1, 1, spatial=(16, 32, 16))
resunet2(1, 1, spatial=(16, 16, 32))
resunet2(1, 1, spatial=(11, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]])
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance')
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer')
net(th.rand(1, 1, 4, 16, 32))
def testHyp1D(self):
net = resunet2(1, 1, spatial=(32,), block=HyperBasic)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance', block=HyperBasic)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer', block=HyperBasic)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), block=HyperBottleneck)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance', block=HyperBottleneck)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer', block=HyperBottleneck)
net(th.rand(1, 1, 16))
def testHyp2D(self):
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], block=HyperBasic)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance', block=HyperBasic)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer', block=HyperBasic)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], block=HyperBottleneck)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance', block=HyperBottleneck)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer', block=HyperBottleneck)
net(th.rand(1, 1, 32, 16))
def testHyp3D(self):
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], block=HyperBasic)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance', block=HyperBasic)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer', block=HyperBasic)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], block=HyperBottleneck)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance', block=HyperBottleneck)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer', block=HyperBottleneck)
net(th.rand(1, 1, 4, 16, 32))
def testSE1D(self):
net = resunet2(1, 1, spatial=(32,), block=SEBasicBlock)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance', block=SEBasicBlock)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer', block=SEBasicBlock)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), block=SEBottleneck)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance', block=SEBottleneck)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer', block=SEBottleneck)
net(th.rand(1, 1, 16))
def testSE2D(self):
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], block=SEBasicBlock)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance', block=SEBasicBlock)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer', block=SEBasicBlock)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], block=SEBottleneck)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance', block=SEBottleneck)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer', block=SEBottleneck)
net(th.rand(1, 1, 32, 16))
def testSE3D(self):
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], block=SEBasicBlock)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance', block=SEBasicBlock)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer', block=SEBasicBlock)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], block=SEBottleneck)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance', block=SEBottleneck)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer', block=SEBottleneck)
net(th.rand(1, 1, 4, 16, 32))
def testHyp2DGroupNorm(self):
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='group', block=HyperBasic)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='group', block=HyperBasic)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='group', block=HyperBasic)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='group', block=HyperBottleneck)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='group', block=HyperBottleneck)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='group', block=HyperBottleneck)
net(th.rand(1, 1, 32, 16))
def testHyp1D2(self):
net = resunet2(1, 1, spatial=(32,), block=HyperBasic2)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance', block=HyperBasic2)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer', block=HyperBasic2)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), block=HyperBottleneck2)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance', block=HyperBottleneck2)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer', block=HyperBottleneck2)
net(th.rand(1, 1, 16))
def testHyp2D2(self):
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], block=HyperBasic2)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance', block=HyperBasic2)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer', block=HyperBasic2)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], block=HyperBottleneck2)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance', block=HyperBottleneck2)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer', block=HyperBottleneck2)
net(th.rand(1, 1, 32, 16))
def testHyp3D2(self):
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], block=HyperBasic2)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance', block=HyperBasic2)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer', block=HyperBasic2)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], block=HyperBottleneck2)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance', block=HyperBottleneck2)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer', block=HyperBottleneck2)
net(th.rand(1, 1, 4, 16, 32))
| [
"[email protected]"
] | |
dc1572244f1304493d64d667155fcbbc94bf2c68 | 30f8afce1ba484183d8e1e14aae76cabb2d92354 | /pythonNet/day2/server_udp.py | 1d475122c9fb66755a8bb12c1c143cd7db4d6ed6 | [] | no_license | brooot/Python_Base_Codes | d83e8c3b8a37b86672412c812fdb0d47deb67836 | a864685e160b5df4162a6f9fb910627eda702aaf | refs/heads/master | 2023-04-10T20:08:39.161289 | 2021-03-25T12:59:23 | 2021-03-25T12:59:23 | 200,570,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | #!/usr/bin/env python3
from socket import *
# 创建套接字对象
sockfd = socket(AF_INET, SOCK_DGRAM)
# 绑定地址
IP = '0.0.0.0'
PORT = 8888
ADDR = (IP, PORT)
sockfd.bind(ADDR)
while True:
# 接受数据(与tcp不同)
data, addr = sockfd.recvfrom(1024)
message = "已收到来自%s的数据:%s" % (addr, data.decode())
print(message)
# 发送数据
send_message = "已经收到您的数据。".encode()
sockfd.sendto(send_message, addr)
sockfd.close() | [
"[email protected]"
] | |
1818810ee229cd68db13a66efefecbe5872edcc2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_35/216.py | d0006fc021592ec292be67dc3cf4606ceec3d5d5 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,591 | py | import string
class GraphNode(object):
def __init__(self, x_pos, y_pos):
self.x_pos = x_pos
self.y_pos = y_pos
self.flows_to = None
self.flows_from = []
self.label = None
def set_flows_to(self, other):
assert self.flows_to != other
self.flows_to = other
other.flows_from.append(self)
def label_node(node, label):
if node.label is None:
node.label = label
if node.flows_to:
label_node(node.flows_to, label)
for from_node in node.flows_from:
label_node(from_node, label)
else:
if node.label != label:
print "Relabeling of node"
adsafa
def label_nodes(h, w, node_map):
current_label = 0
for i in xrange(h):
for j in xrange(w):
label = string.lowercase[current_label]
node = node_map[i][j]
if node.label is None:
label_node(node, label)
current_label += 1
def flow_water(w,h, height_map, node_map):
for i in xrange(h):
for j in xrange(w):
lowest = height_map[i][j]
flow_to = None
if i - 1 >= 0:
if height_map[i-1][j] < lowest:
lowest = height_map[i-1][j]
flow_to = node_map[i-1][j]
if j - 1 >= 0:
if height_map[i][j-1] < lowest:
lowest = height_map[i][j-1]
flow_to = node_map[i][j-1]
if j + 1 < w:
if height_map[i][j+1] < lowest:
lowest = height_map[i][j+1]
flow_to = node_map[i][j+1]
if i + 1 < h:
if height_map[i+1][j] < lowest:
lowest = height_map[i+1][j]
flow_to = node_map[i+1][j]
if flow_to is not None:
node_map[i][j].set_flows_to(flow_to)
def main():
number_of_cases = int(raw_input())
for case_number in range(1, number_of_cases+1):
h,w = map(int, raw_input().split())
print 'Case #%d:' % (case_number,)
height_map = []
node_map = []
for i in xrange(h):
height_map.append(raw_input().split())
line = []
for j in xrange(w):
line.append(GraphNode(i,j))
node_map.append(line)
flow_water(w, h, height_map, node_map)
label_nodes(h, w, node_map)
for node_line in node_map:
for node in node_line:
print node.label,
print
main()
#w, h = 3,3
#height_map = []
#node_map = []
#height_map.append([9,6,3])
#height_map.append([5,9,6])
#height_map.append([3,5,9])
#for i in xrange(h):
#line = []
#for j in xrange(w):
#line.append(GraphNode(i,j))
#node_map.append(line)
#flow_water(w, h, height_map, node_map)
#label_nodes(h, w, node_map)
#for node_line in node_map:
#for node in node_line:
#print node.label,
#print
##if node.flows_to:
##print node.x_pos, node.y_pos, node.flows_to.x_pos, node.flows_to.y_pos, node.label
##else:
##print node.x_pos, node.y_pos, -1, -1, node.label | [
"[email protected]"
] | |
661caf7b460c7daa1b1dcd64f2926900fa1374e5 | 2286b880df34e1bfabe79b3605de287040404560 | /02-02/todolist/task/urls.py | 97bdb244e32e547aaa634f5ef9fd3c9aa9311fa6 | [] | no_license | iklimah27/praxis-academy-2 | e5d8b08807980d6fd8ff6ab73caa6ea18083c7f8 | 925853b520c9a8d7a87d8980d7fedfa604d3b4c8 | refs/heads/master | 2022-12-25T01:54:45.572190 | 2020-10-15T07:22:06 | 2020-10-15T07:22:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from django.contrib import admin
from django.urls import path
from django.shortcuts import render
from . import views
urlpatterns = [
path('', views.index),
path('<id>/', views.detail),
path('<id>/delete/', views.delete),
]
| [
"[email protected]"
] | |
1fe2656260edd35919c9745fc47bafc67970c346 | c9c5463996bf9e2adcd4918857382121b0a5aa56 | /leetcode/堆/重构字符串.py | 0147b66b14e19194532f2ddae5788e111bc1a915 | [] | no_license | Da1anna/Data-Structed-and-Algorithm_python | fdf370c355248081990c57c1c8eb5e05c4781e2b | cce067ef4374128924018b00c5ea77d2e869a834 | refs/heads/master | 2022-12-27T13:24:36.084657 | 2020-10-13T02:39:39 | 2020-10-13T02:39:39 | 174,938,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,278 | py | '''
给定一个字符串S,检查是否能重新排布其中的字母,使得两相邻的字符不同。
若可行,输出任意可行的结果。若不可行,返回空字符串。
示例 1:
输入: S = "aab"
输出: "aba"
示例 2:
输入: S = "aaab"
输出: ""
注意:
S 只包含小写字母并且长度在[1, 500]区间内。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/reorganize-string
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
'''
思路1:双指针交换法,遍历数组,当遇到一个字母与它之前一个不同时,从当前位置开始寻找一个可交换的字母,直到遍历完。
这个思路对 ‘baaba’就不行了,因为它无法将第一个b交换到后面
思路2:将每一类字母及其个数组合成一个元组,加入堆中,每次弹出两个不同的个数最多的字母,更新其个数,
重复直到弹完
'''
import heapq as hp
class Solution:
#双指针交换
def reorganizeString_demo(self, S: str) -> str:
lst = list(S)
for i in range(1, len(lst)):
if lst[i] == lst[i - 1]:
j = i+1
while j < len(lst) and lst[j] == lst[i]:
j += 1
if j < len(lst):
lst[i], lst[j] = lst[j], lst[i]
else:
return ''
return ''.join(lst)
#堆的巧用:一次性弹出两个元素
def reorganizeString(self, S: str) -> str:
#特判
if len(S) == 1:
return S
heap = [(-S.count(x),x) for x in set(S)]
for cnt,x in heap:
#这里的判断需要考虑len的奇偶
if -cnt >= (len(S)+1)//2 + 1:
return ''
hp.heapify(heap)
res = ''
while len(heap) >= 2:
cnt1, c1 = hp.heappop(heap)
cnt2, c2 = hp.heappop(heap)
res += c1 + c2
if cnt1 + 1:
hp.heappush(heap,(cnt1+1,c1))
if cnt2 + 1:
hp.heappush(heap,(cnt2+1,c2))
return res+heap[0][1] if heap else res
#测试
S = 'aaab'
res = Solution().reorganizeString(S)
print(res) | [
"[email protected]"
] | |
58f225e91c9707ccec4037ee3789c38ff19785e9 | 799a0af9c05deabe5d5250a10e480ec15ae0216e | /Xpath_test/xpath_test_10.py | 3c4e2e550651ef49c998f95a34ee15717ae8ac84 | [
"MIT"
] | permissive | waws520waws/waws_spider | 9b2be28834c08166463fe265e0f5c37a874369c8 | c6a5988121f32619a5c5134c09fdfd556c696fe7 | refs/heads/master | 2021-03-20T06:01:22.041937 | 2020-03-28T02:49:16 | 2020-03-28T02:49:16 | 247,183,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | from lxml import etree
"""
contains的使用:应用于一个标签的属性有多个值的情况,如果我们还是用之前的相等的模式,是匹配不到值的
"""
text = '''
<li class="li li-first"><a href="link.html">first item</a></li>
'''
html = etree.HTML(text)
result = html.xpath('//li[contains(@class, "li")]/a/text()')
print(result) | [
"[email protected]"
] | |
f7c6dff56a5dbfbd57c51b742a1f32e141403c38 | da2583af7a14f04aed029a79a79224547de4c1f2 | /rl/policy/gp_linear_mean.py | ba4a963f759f350f923730c7a4ecbcfa39d55142 | [] | no_license | yun-long/rl_prototype | 4b0af8b817ad1c8bc30a46d7fa2e8f5cd37f7ea1 | 0a86a097d58ce299da90ea346e074f20fe167a5d | refs/heads/master | 2020-04-23T15:37:49.498870 | 2019-02-18T11:28:21 | 2019-02-18T11:28:21 | 171,271,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,545 | py | """
Gaussin policy, linear mean, constant variance
Reference: Jan Peters, A Survey on policy search for robotics
"""
import numpy as np
import time
from rl.policy.base import GaussianPolicy
class GPLinearMean(GaussianPolicy):
def __init__(self, env, featurizer):
#
self.env = env
#
self.num_features = featurizer.num_features
self.num_actions = env.action_space.shape[0]
self.featurizer = featurizer
#
self.Mu_theta = np.random.randn(self.num_features, self.num_actions) / np.sqrt(self.num_features)
self.Sigma_action = np.eye(self.num_actions) * 1e1 # for exploration in parameter space
super().__init__()
def predict_action(self, state):
"""
Exploration in action_space, used for Step-based usually.
:param state:
:return:
"""
featurized_state = self.featurizer.transform(state).T
Mu_action = np.dot(self.Mu_theta.T, featurized_state).reshape(self.num_actions)
try:
action = np.random.multivariate_normal(Mu_action, self.Sigma_action)
except:
raise ValueError
return action
def update_pg(self, alpha_coeff, theta_samples, advantanges):
pass
def update_wml(self, Weights, Phi, A):
T = Phi.shape[0]
phi = Phi.reshape((T, self.num_features))
Q = Weights.reshape(T)
Q = np.diag(Q)
A = A.reshape((T, self.num_actions))
theta_tmp1 = np.linalg.inv(np.dot(phi.T, np.dot(Q, phi)))
theta_tmp2 = np.dot(phi.T, np.dot(Q, A))
self.Mu_theta = np.dot(theta_tmp1, theta_tmp2).reshape(self.Mu_theta.shape)
#
Z = (np.sum(Weights)**2 - np.sum(Weights**2)) / np.sum(Weights)
nume_sum = 0
for i in range(len(Weights)):
tmp = np.outer((A[i] - np.dot(self.Mu_theta.T, phi[i, :])), (A[i] - np.dot(self.Mu_theta.T, phi[i, :])))
tmp = Weights[i] * tmp
nume_sum += tmp
self.Sigma_action = nume_sum / Z
def optimal_policy_demo(self, num_demos):
for i_demo in range(num_demos):
print("Optimal Policy Demo : ", i_demo)
state = self.env.reset()
while True:
action = self.predict_action(state)
next_state, rewards, done, _ = self.env.step(action)
state = next_state
self.env.render()
if done:
time.sleep(1)
break
self.env.render(close=True)
| [
"[email protected]"
] | |
24007ef5ef566f228a7667133ecccce9e2ca71b6 | 9be143a314f58bad3ca607e8c322415e6d05a30f | /venv/Scripts/pip3-script.py | 5c0139f6223e17f6c9f55c676299039aed715457 | [] | no_license | zibb03/Face-Emotion-Recognition | baec3b7d57636642641e52afd73f1ef5436a51d6 | fd5b04fc34fc8cfa9415ae7ab5fa85316c3be6d4 | refs/heads/main | 2023-06-18T15:49:40.905788 | 2021-07-19T15:05:00 | 2021-07-19T15:05:00 | 355,542,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | #!C:\Users\user\PycharmProjects\OpenCV\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
2949ad30d2c1f779dd0f7906f17943d31c121fb1 | eac22714038e840028cc5abb72bc750004626ebb | /mct_camera_tools/nodes/image_proc_master.py | 3316d09e4957ac3d0f6500030feeee1cccdedf4f | [
"Apache-2.0"
] | permissive | iorodeo/mct | 79b19f6dab9f6567452df7274d67245bf64b1801 | fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11 | refs/heads/master | 2022-11-11T18:03:18.178182 | 2014-08-20T19:21:27 | 2014-08-20T19:21:27 | 273,790,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,780 | py | #!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('mct_camera_tools')
import rospy
import os
import os.path
import tempfile
import subprocess
from mct_xml_tools import launch
# Services
from mct_msg_and_srv.srv import CommandString
from mct_msg_and_srv.srv import CommandStringResponse
class Image_Proc_Master(object):
"""
Image proc master node. Provides service which launches/kills image_proc nodes for
every camera with a calibratoin.
"""
def __init__(self):
self.tmp_dir = tempfile.gettempdir()
self.launch_file = os.path.join(self.tmp_dir,'image_proc.launch')
self.image_proc_popen = None
rospy.on_shutdown(self.clean_up)
rospy.init_node('image_proc_master')
self.camera_srv = rospy.Service(
'image_proc_master',
CommandString,
self.handle_image_proc_srv,
)
def handle_image_proc_srv(self,req):
"""
Handles requests to lauch/kill the image proc nodes.
"""
command = req.command.lower()
response = True
message = ''
if command == 'start':
if self.image_proc_popen is None:
self.launch_image_proc_nodes()
else:
response = False
message = 'image proc nodes already running'
elif command == 'stop':
if self.image_proc_popen is not None:
self.kill_image_proc_nodes()
else:
response = False
message = 'image proc nodes not running'
else:
response = False
message = 'uknown command: {0}'.format(command)
return CommandStringResponse(response,message)
def launch_image_proc_nodes(self):
"""
Launches the image_proc nodes.
"""
if self.image_proc_popen is None:
launch.create_image_proc_launch(self.launch_file)
self.image_proc_popen = subprocess.Popen(['roslaunch',self.launch_file])
def kill_image_proc_nodes(self):
"""
Kills the image_proc nodes.
"""
if self.image_proc_popen is not None:
self.image_proc_popen.send_signal(subprocess.signal.SIGINT)
self.image_proc_popen = None
try:
os.remove(self.launch_file)
except OSError, e:
rospy.logwarn('Error removing image_proc launch file: {0}'.format(str(e)))
def clean_up(self):
self.kill_image_proc_nodes()
def run(self):
rospy.spin()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
node = Image_Proc_Master()
node.run()
| [
"[email protected]"
] | |
dd5617275d2a87e52a380d2ccfcdf4777e0566ba | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /checkov/common/graph/checks_infra/debug.py | 26b247b24b08837b95bd15668c25aedf4d45d7c6 | [
"Apache-2.0"
] | permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 6,589 | py | from __future__ import annotations
import json
import logging
from collections.abc import Iterable
from typing import Any, TYPE_CHECKING
import yaml
from termcolor import colored
from checkov.common.graph.graph_builder import CustomAttributes
from checkov.common.resource_code_logger_filter import add_resource_code_filter_to_logger
from checkov.common.util.env_vars_config import env_vars_config
if TYPE_CHECKING:
from checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver
logger = logging.getLogger(__name__)
add_resource_code_filter_to_logger(logger)
def graph_check(check_id: str, check_name: str) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
print(f'\nEvaluating graph policy: "{check_id}" - "{check_name}"')
def resource_types(resource_types: Iterable[str], resource_count: int, operator: str) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
resource_types_str = '", "'.join(resource_types)
print(
f'\nFound {resource_count} resources with resource types: "{resource_types_str}" to check against operator: "{operator}"'
)
def attribute_block(
resource_types: Iterable[str],
attribute: str | None,
operator: str,
value: str | list[str] | None,
resource: dict[str, Any],
status: str,
) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
attribute_block_conf = _create_attribute_block(
resource_types=resource_types, attribute=attribute, operator=operator, value=value
)
color = "green" if status == "passed" else "red"
print("\nEvaluated block:\n")
print(colored(yaml.dump([attribute_block_conf], sort_keys=False), "blue"))
print("and got:")
print(colored(f'\nResource "{resource[CustomAttributes.ID]}" {status}:', color))
print(colored(json.dumps(resource[CustomAttributes.CONFIG], indent=2), "yellow"))
def connection_block(
resource_types: Iterable[str],
connected_resource_types: Iterable[str],
operator: str,
passed_resources: list[dict[str, Any]],
failed_resources: list[dict[str, Any]],
) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
connection_block_conf = _create_connection_block(
resource_types=resource_types,
connected_resource_types=connected_resource_types,
operator=operator,
)
passed_resources_str = '", "'.join(resource[CustomAttributes.ID] for resource in passed_resources)
failed_resources_str = '", "'.join(resource[CustomAttributes.ID] for resource in failed_resources)
print("\nEvaluated blocks:\n")
print(colored(yaml.dump([connection_block_conf], sort_keys=False), "blue"))
print("and got:\n")
print(colored(f'Passed resources: "{passed_resources_str}"', "green"))
print(colored(f'Failed resources: "{failed_resources_str}"', "red"))
def complex_connection_block(
solvers: list[BaseSolver],
operator: str,
passed_resources: list[dict[str, Any]],
failed_resources: list[dict[str, Any]],
) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
# to prevent circular dependencies
from checkov.common.checks_infra.solvers.attribute_solvers.base_attribute_solver import BaseAttributeSolver
from checkov.common.checks_infra.solvers.complex_solvers.base_complex_solver import BaseComplexSolver
from checkov.common.checks_infra.solvers.connections_solvers.base_connection_solver import BaseConnectionSolver
from checkov.common.checks_infra.solvers.connections_solvers.complex_connection_solver import (
ComplexConnectionSolver,
)
from checkov.common.checks_infra.solvers.filter_solvers.base_filter_solver import BaseFilterSolver
complex_connection_block = []
for solver in solvers:
if isinstance(solver, BaseAttributeSolver):
block = _create_attribute_block(
resource_types=solver.resource_types,
attribute=solver.attribute,
operator=solver.operator,
value=solver.value,
)
elif isinstance(solver, BaseFilterSolver):
block = _create_filter_block(attribute=solver.attribute, operator=solver.operator, value=solver.value)
elif isinstance(solver, (ComplexConnectionSolver, BaseComplexSolver)):
# ComplexConnectionSolver check needs to be before BaseConnectionSolver, because it is a subclass
block = {solver.operator: ["..." for _ in solver.solvers]}
elif isinstance(solver, BaseConnectionSolver):
block = _create_connection_block(
resource_types=solver.resource_types,
connected_resource_types=solver.connected_resources_types,
operator=solver.operator,
)
else:
logger.info(f"Unsupported solver type {type(solver)} found")
continue
complex_connection_block.append(block)
passed_resources_str = '", "'.join(resource[CustomAttributes.ID] for resource in passed_resources)
failed_resources_str = '", "'.join(resource[CustomAttributes.ID] for resource in failed_resources)
print("\nEvaluated blocks:\n")
print(colored(yaml.dump([{operator: complex_connection_block}], sort_keys=False), "blue"))
print("and got:\n")
print(colored(f'Passed resources: "{passed_resources_str}"', "green"))
print(colored(f'Failed resources: "{failed_resources_str}"', "red"))
def _create_attribute_block(
resource_types: Iterable[str], attribute: str | None, operator: str, value: str | list[str] | None
) -> dict[str, Any]:
attribute_block_conf = {
"cond_type": "attribute",
"resource_types": resource_types,
"attribute": attribute,
"operator": operator,
}
if value is not None:
attribute_block_conf["value"] = value
return attribute_block_conf
def _create_connection_block(
resource_types: Iterable[str], connected_resource_types: Iterable[str], operator: str
) -> dict[str, Any]:
attribute_block_conf = {
"cond_type": "connection",
"resource_types": resource_types,
"connected_resource_types": connected_resource_types,
"operator": operator,
}
return attribute_block_conf
def _create_filter_block(attribute: str | None, operator: str, value: str | list[str]) -> dict[str, Any]:
attribute_block_conf = {
"cond_type": "filter",
"attribute": attribute,
"operator": operator,
"value": value,
}
return attribute_block_conf
| [
"[email protected]"
] | |
b9c56ac1d31b2218826dbd63b673f4c3cff2e16a | a2f78983557c1ead7b2a7c3e720d4719099878b9 | /python/ray/experimental/sgd/tf/tf_runner.py | 384136ba79630ef2660e8ee46da3cf60f3455ccf | [
"Apache-2.0",
"MIT"
] | permissive | Senmumu/ray | 3fc914a0a5d9da8fcaa3411bc04be7fba3ce6bbd | 130b8f21da4fb5383b079493faaea5d81065b772 | refs/heads/master | 2020-07-18T12:08:51.862689 | 2019-09-03T22:36:25 | 2019-09-03T22:36:25 | 206,242,928 | 1 | 0 | Apache-2.0 | 2019-09-04T05:59:44 | 2019-09-04T05:59:44 | null | UTF-8 | Python | false | false | 5,250 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import json
import os
import numpy as np
import ray
import ray.services
from ray.experimental.sgd import utils
logger = logging.getLogger(__name__)
def _try_import_strategy():
"""Late import for Tesnorflow"""
from tensorflow.distribute.experimental import MultiWorkerMirroredStrategy
return MultiWorkerMirroredStrategy
class TFRunner(object):
"""Manages a TensorFlow model for training."""
def __init__(self, model_creator, data_creator, config=None,
verbose=False):
"""Initializes the runner.
Args:
model_creator (dict -> Model): see tf_trainer.py.
data_creator (dict -> tf.Dataset, tf.Dataset): see tf_trainer.py.
config (dict): see tf_trainer.py.
verbose (bool): Outputs training data if true.
"""
self.model_creator = model_creator
self.data_creator = data_creator
self.config = {} if config is None else config
self.epoch = 0
self.verbose = verbose
def setup(self):
"""Initializes the model."""
logger.debug("Creating dataset")
self.train_dataset, self.test_dataset = self.data_creator(self.config)
logger.debug("Creating model")
self.model = self.model_creator(self.config)
def setup_distributed(self, urls, world_rank, world_size):
"""Sets up TensorFLow distributed environment and initializes the model.
Args:
urls (str): the URLs that each node uses to connect.
world_rank (int): the index of the runner.
world_size (int): the total number of runners.
"""
assert len(urls) == world_size
tf_config = {
"cluster": {
"worker": urls
},
"task": {
"index": world_rank,
"type": "worker"
}
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
MultiWorkerMirroredStrategy = _try_import_strategy()
self.strategy = MultiWorkerMirroredStrategy()
self.train_dataset, self.test_dataset = self.data_creator(self.config)
logger.debug("Creating model with MultiWorkerMirroredStrategy")
with self.strategy.scope():
self.model = self.model_creator(self.config)
# For use in model.evaluate()
self.local_model = None
def step(self):
"""Runs a training epoch and updates the model parameters."""
fit_default_config = {"verbose": self.verbose}
fit_default_config.update(self.config.get("fit_config", {}))
history = self.model.fit(self.train_dataset, **fit_default_config)
if history is None:
stats = {}
else:
stats = {"train_" + k: v[-1] for k, v in history.history.items()}
self.epoch += 1
return stats
def validate(self):
"""Evaluates the model on the validation data set."""
stats = {}
evaluate_config = {"verbose": self.verbose}
evaluate_config.update(self.config.get("evaluate_config", {}))
results = self.model.evaluate(self.test_dataset, **evaluate_config)
if results is None:
# Using local Model since model.evaluate() returns None
# for MultiWorkerMirroredStrategy
logger.warning("Running a local model to get validation score.")
self.local_model = self.model_creator(self.config)
self.local_model.set_weights(self.model.get_weights())
results = self.local_model.evaluate(self.test_dataset,
**evaluate_config)
if isinstance(results, list):
stats = {
"validation_" + k: v
for k, v in zip(self.model.metrics_names, results)
}
else:
stats = {"loss": results}
return stats
def get_state(self):
"""Returns the state of the runner."""
return {
"epoch": self.epoch,
"weights": self.model.get_weights(),
"optimizer_weights": self.model.optimizer.get_weights()
}
def set_state(self, state):
"""Sets the state of the model."""
self.model = self.model_creator(self.config)
self.epoch = state["epoch"]
self.model.set_weights(state["weights"])
# This part is due to ray.get() changing scalar np.int64 object to int
state["optimizer_weights"][0] = np.array(
state["optimizer_weights"][0], dtype=np.int64)
if self.model.optimizer.weights == []:
self.model._make_train_function()
self.model.optimizer.set_weights(state["optimizer_weights"])
def shutdown(self):
"""Attempts to shut down the worker."""
del self.model
del self.train_dataset
del self.test_dataset
def get_node_ip(self):
"""Returns the IP address of the current node."""
return ray.services.get_node_ip_address()
def find_free_port(self):
"""Finds a free port on the current node."""
return utils.find_free_port()
| [
"[email protected]"
] | |
6e3f6c3449d7f93848d1116b344a0dcabece60f2 | 6cf86e6122b3c65853231481ff73d40a25374eb1 | /Input().py | 068cf37daec039fb7b1eb52c376b116f2b161ace | [] | no_license | npc203/hackerrank-one-liners | d9480ce71cde342458689250963d1f69f3a38093 | a7eb66c8d1bfa3508cae28ff6160db2728df3b5b | refs/heads/main | 2023-07-17T12:03:03.757519 | 2021-08-20T12:30:18 | 2021-08-20T12:30:48 | 375,340,120 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | (lambda x,b : print(eval(input()) == b))(*map(int,input().split())) | [
"[email protected]"
] | |
114b4c677ac9ad89733f6b700813000b37a0f4b4 | 7f7fc72cf2f2f06ef7eb5d852d0bd2caf3f2daf9 | /sirepo/runner.py | 6b4f1b20aa57674dc9faee958a025c976818c02e | [
"Apache-2.0"
] | permissive | kalebswartz7/sirepo | 4bcd41113ba93a3f7bcfa47df27e79805e1e4f50 | 8d1f2b3914cf9622eaae6b0bf32e23e38e4e5972 | refs/heads/master | 2020-03-19T08:31:41.409642 | 2018-07-20T19:32:48 | 2018-07-20T19:32:48 | 136,211,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,268 | py | # -*- coding: utf-8 -*-
u"""Run jobs
:copyright: Copyright (c) 2016 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
decouple so can start any type of job
add is_background_import to simulation_db
select docker for that if configured and not background
need to have hard constraints on the docker container
runner.init_job() does the dispatch
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcli
from pykern import pkcollections
from pykern import pkconfig
from pykern import pkio
from pykern import pkjinja
from pykern.pkdebug import pkdc, pkdexc, pkdlog, pkdp
from sirepo import simulation_db
from sirepo.template import template_common
import aenum
import errno
import os
import pwd
import signal
import subprocess
import sys
import threading
import time
import uuid
#: Configuration
cfg = None
# Map of jid to instance
_job_map = pkcollections.Dict()
_job_map_lock = threading.RLock()
class State(aenum.UniqueEnum):
INIT = 1
START = 2
KILL = 3
RUN = 4
STOP = 5
# how long to wait before assuming thread that created
# job is dead.
_INIT_TOO_LONG_SECS = 5
# time expected between created and running
_DOCKER_CREATED_TOO_LONG_SECS = _INIT_TOO_LONG_SECS
# how long to wait after first kill (TERM) to second kill (KILL)
_KILL_TIMEOUT_SECS = 3
# prefix all report names
_DOCKER_CONTAINER_PREFIX = 'srjob-'
_MAX_OPEN_FILES = 1024
@pkconfig.parse_none
def cfg_job_class(value):
"""Return job queue class based on name
Args:
value (object): May be class or str.
Returns:
object: `Background` or `Celery` class.
"""
if isinstance(value, type) and issubclass(value, (Celery, Background)):
# Already initialized but may call initializer with original object
return value
if value == 'Celery':
if pkconfig.channel_in('dev'):
_assert_celery()
return Celery
elif value == 'Docker':
return Docker
elif value == 'Background':
signal.signal(signal.SIGCHLD, Background._sigchld_handler)
return Background
elif value is None:
return None
else:
raise AssertionError('{}: unknown job_class'.format(value))
def init(app, uwsgi):
"""Initialize module"""
if cfg.job_class is None:
from sirepo import server
d = 'Background'
if server.cfg.job_queue:
# Handle deprecated case
d = server.cfg.job_queue
cfg.job_class = cfg_job_class(d)
assert not uwsgi or not issubclass(cfg.job_class, Background), \
'uwsgi does not work if sirepo.runner.cfg.job_class=Background'
def job_is_processing(jid):
with _job_map_lock:
try:
job = _job_map[jid]
except KeyError:
return False
return job.is_processing()
def job_kill(jid):
"""Terminate job
Args:
jid (str): see `simulation_db.job_id`
"""
with _job_map_lock:
try:
job = _job_map[jid]
except KeyError:
return
job.kill()
def job_race_condition_reap(jid):
return job_kill(jid)
def job_start(data):
with _job_map_lock:
jid = simulation_db.job_id(data)
if jid in _job_map:
#TODO(robnagler) assumes external check of is_processing,
# which server._simulation_run_status does do, but this
# could be cleaner. Really want a reliable daemon thread
# to manage all this.
raise Collision(jid)
job = cfg.job_class(jid, data)
_job_map[jid] = job
job.start()
class Base(object):
"""Super of all job classes"""
def __init__(self, jid, data):
self.data = data
self.jid = jid
self.lock = threading.RLock()
self.set_state(State.INIT)
def is_processing(self):
with self.lock:
if self.state == State.RUN:
if self._is_processing():
return True
elif self.state == State.INIT:
if time.time() < self.state_changed + _INIT_TOO_LONG_SECS:
return True
else:
assert self.state in (State.START, State.KILL, State.STOP), \
'{}: invalid state for jid='.format(self.state, self.jid)
# reap the process in a non-running state
self.kill()
return False
def kill(self):
with self.lock:
if self.state in (State.RUN, State.START, State.KILL):
# normal case (RUN) or thread died while trying to kill job
self._kill()
elif not self.state in (State.INIT, State.STOP):
raise AssertionError(
'{}: invalid state for jid='.format(self.state, self.jid),
)
self.set_state(State.STOP)
with _job_map_lock:
try:
if self == _job_map[self.jid]:
del _job_map[self.jid]
except KeyError:
# stopped and no longer in map
return
def set_state(self, state):
self.state = state
self.state_changed = time.time()
def start(self):
with self.lock:
if self.state == State.STOP:
# Something killed between INIT and START so don't start
return
elif self.state in (State.KILL, State.RUN):
# normal case (RUN) or race condition on start/kill
# with a thread that died while trying to kill this
# job before it was started. Have to finish the KILL.
self.kill()
return
else:
# race condition that doesn't seem possible
assert self.state == State.INIT, \
'{}: unexpected state for jid={}'.format(self.state, self.jid)
self.set_state(State.START)
self.cmd, self.run_dir = simulation_db.prepare_simulation(self.data)
self._start()
self.set_state(State.RUN)
class Background(Base):
"""Run as subprocess"""
def _is_processing(self):
try:
os.kill(self.pid, 0)
except OSError:
self.pid = 0
return False
return True
def _kill(self):
if self.pid == 0:
return
pid = self.pid
for sig in (signal.SIGTERM, signal.SIGKILL):
try:
pkdlog('{}: kill {} pid={}', self.jid, sig, self.pid)
os.kill(self.pid, sig)
for j in range(_KILL_TIMEOUT_SECS):
time.sleep(1)
pid, status = os.waitpid(self.pid, os.WNOHANG)
if pid != 0:
break
else:
continue
if pid == self.pid:
pkdlog('{}: waitpid: status={}', pid, status)
self.pid = 0
break
else:
pkdlog(
'pid={} status={}: unexpected waitpid result; job={} pid={}',
pid,
status,
self.jid,
self.pid,
)
except OSError as e:
if not e.errno in (errno.ESRCH, errno.ECHILD):
raise
# reaped by _sigchld_handler()
return
@classmethod
def _sigchld_handler(cls, signum=None, frame=None):
try:
with _job_map_lock:
if not _job_map:
# Can't be our job so don't waitpid.
# Only important at startup, when other modules
# are doing popens, which does a waitpid.
# see radiasoft/sirepo#681
return
pid, status = os.waitpid(-1, os.WNOHANG)
if pid == 0:
# a process that was reaped before sigchld called
return
for self in _job_map.values():
# state of 'pid' is unknown since outside self.lock
if isinstance(self, Background) and getattr(self, 'pid', 0) == pid:
pkdlog('{}: waitpid pid={} status={}', self.jid, pid, status)
break
else:
pkdlog('pid={} status={}: unexpected waitpid', pid, status)
return
with self.lock:
self.pid = 0
self.kill()
except OSError as e:
if not e.errno in (errno,ESRCH, errno.ECHILD):
pkdlog('waitpid: OSError: {} errno={}', e.strerror, e.errno)
def _start(self):
"""Detach a process from the controlling terminal and run it in the
background as a daemon.
We don't use pksubprocess. This method is not called from the MainThread
so can't set signals.
"""
try:
pid = os.fork()
except OSError as e:
pkdlog('{}: fork OSError: {} errno={}', self.jid, e.strerror, e.errno)
reraise
if pid != 0:
pkdlog('{}: started: pid={} cmd={}', self.jid, pid, self.cmd)
self.pid = pid
return
try:
os.chdir(str(self.run_dir))
#Don't os.setsid() so signals propagate properly
import resource
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = _MAX_OPEN_FILES
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError:
pass
sys.stdin = open(template_common.RUN_LOG, 'a+')
assert sys.stdin.fileno() == 0
os.dup2(0, 1)
sys.stdout = os.fdopen(1, 'a+')
os.dup2(0, 2)
sys.stderr = os.fdopen(2, 'a+')
pkdlog('{}: child will exec: {}', self.jid, self.cmd)
sys.stderr.flush()
try:
simulation_db.write_status('running', self.run_dir)
os.execvp(self.cmd[0], self.cmd)
finally:
pkdlog('{}: execvp error: {} errno={}', self.jid, e.strerror, e.errno)
sys.exit(1)
except BaseException as e:
with open(str(self.run_dir.join(template_common.RUN_LOG)), 'a') as f:
f.write('{}: error starting simulation: {}'.format(self.jid, e))
raise
class Celery(Base):
"""Run job in Celery (prod)"""
def _is_processing(self):
"""Job is either in the queue or running"""
res = getattr(self, 'async_result', None)
return res and not res.ready()
def _kill(self):
from celery.exceptions import TimeoutError
if not self._is_processing():
return False
res = self.async_result
tid = getattr(res, 'task_id', None)
pkdlog('{}: kill SIGTERM tid={}', self.jid, tid)
try:
res.revoke(terminate=True, wait=True, timeout=_KILL_TIMEOUT_SECS, signal='SIGTERM')
except TimeoutError as e:
pkdlog('{}: kill SIGKILL tid={}', self.jid, tid)
res.revoke(terminate=True, signal='SIGKILL')
def _start(self):
"""Detach a process from the controlling terminal and run it in the
background as a daemon.
"""
from sirepo import celery_tasks
self.celery_queue = simulation_db.celery_queue(self.data)
self.async_result = celery_tasks.start_simulation.apply_async(
args=[self.cmd, str(self.run_dir)],
queue=self.celery_queue,
)
pkdc(
'{}: started tid={} dir={} queue={} len_jobs={}',
self.jid,
self.async_result.task_id,
self.run_dir,
self.celery_queue,
len(_job_map),
)
class Collision(Exception):
"""Avoid using a mutex"""
pass
class Docker(Base):
"""Run a code in docker"""
def _is_processing(self):
"""Inspect container to see if still in running state"""
out = self.__docker(['inspect', '--format={{.State.Status}}', self.cid])
if not out:
self.cid = None
return False
if out == 'running':
return True
if out == 'created':
return time.time() < self.state_changed + _DOCKER_CREATED_TOO_LONG_SECS
return False
def _kill(self):
if self.cid:
pkdlog('{}: stop cid={}', self.jid, self.cid)
self.__docker(['stop', '--time={}'.format(_KILL_TIMEOUT_SECS), self.cid])
self.cid = None
def _start(self):
"""Detach a process from the controlling terminal and run it in the
background as a daemon.
"""
#POSIT: jid is valid docker name (word chars and dash)
self.cname = _DOCKER_CONTAINER_PREFIX + self.jid
ctx = pkcollections.Dict(
kill_secs=_KILL_TIMEOUT_SECS,
run_dir=self.run_dir,
run_log=self.run_dir.join(template_common.RUN_LOG),
run_secs=self.__run_secs(),
sh_cmd=self.__sh_cmd(),
)
script = str(self.run_dir.join(_DOCKER_CONTAINER_PREFIX + 'run.sh'))
with open(str(script), 'wb') as f:
f.write(pkjinja.render_resource('runner/docker.sh', ctx))
cmd = [
'run',
#TODO(robnagler) configurable
'--cpus=1',
'--detach',
'--init',
'--log-driver=json-file',
# never should be large, just for output of the monitor
'--log-opt=max-size=1m',
'--memory=1g',
'--name=' + self.cname,
'--network=none',
'--rm',
'--ulimit=core=0',
#TODO(robnagler) this doesn't do anything
# '--ulimit=cpu=1',
'--ulimit=nofile={}'.format(_MAX_OPEN_FILES),
'--user=' + pwd.getpwuid(os.getuid()).pw_name,
] + self.__volumes() + [
#TODO(robnagler) make this configurable per code (would be structured)
self.__image(),
'bash',
script,
]
self.cid = self.__docker(cmd)
pkdc(
'{}: started cname={} cid={} dir={} len_jobs={} cmd={}',
self.jid,
self.cname,
self.cid,
self.run_dir,
len(_job_map),
' '.join(cmd),
)
def __docker(self, cmd):
cmd = ['docker'] + cmd
try:
pkdc('Running: {}', ' '.join(cmd))
return subprocess.check_output(
cmd,
stdin=open(os.devnull),
stderr=subprocess.STDOUT,
).rstrip()
except subprocess.CalledProcessError as e:
pkdlog('{}: failed: exit={} output={}', cmd, e.returncode, e.output)
return None
def __image(self):
res = cfg.docker_image
if ':' in res:
return res
return res + ':' + pkconfig.cfg.channel
def __run_secs(self):
if self.data['report'] == 'backgroundImport':
return cfg.import_secs
if simulation_db.is_parallel(self.data):
return cfg.parallel_secs
return cfg.sequential_secs
def __sh_cmd(self):
"""Convert ``self.cmd`` into a bash cmd"""
res = []
for c in self.cmd:
assert not "'" in c, \
'{}: sh_cmd contains a single quote'.format(cmd)
res.append("'{}'".format(c))
return ' '.join(res)
def __volumes(self):
res = []
def _res(src, tgt):
res.append('--volume={}:{}'.format(src, tgt))
if pkconfig.channel_in('dev'):
for v in '~/src', '~/.pyenv':
v = pkio.py_path('~/src')
# pyenv and src shouldn't be writable, only rundir
_res(v, v + ':ro')
_res(self.run_dir, self.run_dir)
return res
def _assert_celery():
"""Verify celery & rabbit are running"""
from sirepo import celery_tasks
import time
for x in range(10):
err = None
try:
if not celery_tasks.celery.control.ping():
err = 'You need to start Celery:\nsirepo service celery'
except Exception:
err = 'You need to start Rabbit:\nsirepo service rabbitmq'
# Rabbit doesn't have a long timeout, but celery ping does
time.sleep(.5)
if not err:
return
#TODO(robnagler) really should be pkconfig.Error() or something else
# but this prints a nice message. Don't call sys.exit, not nice
pkcli.command_error(err)
cfg = pkconfig.init(
docker_image=('radiasoft/sirepo', str, 'docker image to run all jobs'),
import_secs=(10, int, 'maximum runtime of backgroundImport'),
# default is set in init(), because of server.cfg.job_gueue
job_class=(None, cfg_job_class, 'how to run jobs: Celery or Background'),
parallel_secs=(3600, int, 'maximum runtime of serial job'),
sequential_secs=(300, int, 'maximum runtime of serial job'),
)
| [
"[email protected]"
] | |
b1fc4028b00d66db57ef3d4fca7602a0b3de1815 | 8eb0f65096f9a9fe90a88c85dcdcaf12f9a8a512 | /apps/maintenance_mode/middleware.py | cd0e09b3e8aba28e3af198050f46e5958f5de4a4 | [
"MIT"
] | permissive | WhitespaceCrew/django-htk | 57c8cc9ec30b4cd9511b717978758c47144de76f | 6a7b87a3d0a2e4cb51f6b8059708a26fa8e613df | refs/heads/master | 2020-12-31T01:10:14.900413 | 2016-02-03T19:24:27 | 2016-02-03T19:25:02 | 45,211,442 | 0 | 0 | null | 2015-10-29T21:23:54 | 2015-10-29T21:23:54 | null | UTF-8 | Python | false | false | 873 | py | from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from htk.apps.maintenance_mode.utils import is_maintenance_mode
from htk.utils import htk_setting
class MaintenanceModeMiddleware(object):
"""Checks whether HTK_MAINTENANCE_MODE is set
If so, redirects to the HTK_MAINTENANCE_MODE_URL_NAME page
"""
def process_request(self, request):
maintenance_mode_page = reverse(htk_setting('HTK_MAINTENANCE_MODE_URL_NAME'))
response = None
if request.path == maintenance_mode_page:
if not is_maintenance_mode():
response = redirect('/')
else:
# already here
pass
else:
if is_maintenance_mode():
response = redirect(maintenance_mode_page)
else:
pass
return response
| [
"[email protected]"
] | |
b6b7520917496dbd41f7f57d11d8d68f84434ff7 | ee179dd9e9b24046508b11a60612da3758c7e122 | /lib/python2.7/site-packages/nltk/stem/api.py | c6032423e84a9a5b8a1985afcf341e4084970792 | [] | no_license | buhtigexa/Nerit | fcd6cb08a0935e5b80392ae2acf68ba52ee8a899 | d55629f6289c1fa6efe60802a78b79932ff248a2 | refs/heads/master | 2021-01-21T13:11:51.105930 | 2015-05-01T23:56:02 | 2015-05-01T23:56:02 | 34,728,820 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | # Natural Language Toolkit: Stemmer Interface
#
# Copyright (C) 2001-2014 NLTK Project
# Author: Trevor Cohn <[email protected]>
# Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
class StemmerI(object):
"""
A processing interface for removing morphological affixes from
words. This process is known as stemming.
"""
def stem(self, token):
"""
Strip affixes from the token and return the stem.
:param token: The token that should be stemmed.
:type token: str
"""
raise NotImplementedError()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| [
"[email protected]"
] | |
37cd36176891ea926eef36e5b677f6b4352ae940 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-as/huaweicloudsdkas/v1/model/pause_scaling_group_request.py | c88cd3d690fe6f2fc4753b1834d1bd3b10a9f9f9 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,079 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PauseScalingGroupRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'scaling_group_id': 'str',
'body': 'PauseScalingGroupOption'
}
attribute_map = {
'scaling_group_id': 'scaling_group_id',
'body': 'body'
}
def __init__(self, scaling_group_id=None, body=None):
"""PauseScalingGroupRequest
The model defined in huaweicloud sdk
:param scaling_group_id: 伸缩组ID
:type scaling_group_id: str
:param body: Body of the PauseScalingGroupRequest
:type body: :class:`huaweicloudsdkas.v1.PauseScalingGroupOption`
"""
self._scaling_group_id = None
self._body = None
self.discriminator = None
self.scaling_group_id = scaling_group_id
if body is not None:
self.body = body
@property
def scaling_group_id(self):
"""Gets the scaling_group_id of this PauseScalingGroupRequest.
伸缩组ID
:return: The scaling_group_id of this PauseScalingGroupRequest.
:rtype: str
"""
return self._scaling_group_id
@scaling_group_id.setter
def scaling_group_id(self, scaling_group_id):
"""Sets the scaling_group_id of this PauseScalingGroupRequest.
伸缩组ID
:param scaling_group_id: The scaling_group_id of this PauseScalingGroupRequest.
:type scaling_group_id: str
"""
self._scaling_group_id = scaling_group_id
@property
def body(self):
"""Gets the body of this PauseScalingGroupRequest.
:return: The body of this PauseScalingGroupRequest.
:rtype: :class:`huaweicloudsdkas.v1.PauseScalingGroupOption`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this PauseScalingGroupRequest.
:param body: The body of this PauseScalingGroupRequest.
:type body: :class:`huaweicloudsdkas.v1.PauseScalingGroupOption`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PauseScalingGroupRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
42ee0b0d809863a628c4d9a10375863e7328db4a | fb54704d4a6f9475f42b85d8c470e3425b37dcae | /medium/ex46.py | b8f578eefedb0af0bc3a15588f48718e85d76ec0 | [] | no_license | ziyuan-shen/leetcode_algorithm_python_solution | b2784071a94b04e687fd536b57e8d5a9ec1a4c05 | 920b65db80031fad45d495431eda8d3fb4ef06e5 | refs/heads/master | 2021-06-27T05:19:47.774044 | 2021-02-04T09:47:30 | 2021-02-04T09:47:30 | 210,991,299 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
ans = {(nums[i],): nums[:i] + nums[i+1:] for i in range(len(nums))}
for _ in range(len(nums)-1):
for permute in list(ans):
remaining = ans[permute]
for i in range(len(remaining)):
ans[permute+(remaining[i],)] = remaining[:i] + remaining[i+1:]
ans.pop(permute)
return [list(permute) for permute in ans] | [
"[email protected]"
] | |
37434a2d02bf51c411162c56fe9eda123ad980d9 | bede13ba6e7f8c2750815df29bb2217228e91ca5 | /advance_cash_flow_statements/wizard/account_account.py | 8ab4d6059149ffc32c123a592816f6a73772185a | [] | no_license | CybroOdoo/CybroAddons | f44c1c43df1aad348409924603e538aa3abc7319 | 4b1bcb8f17aad44fe9c80a8180eb0128e6bb2c14 | refs/heads/16.0 | 2023-09-01T17:52:04.418982 | 2023-09-01T11:43:47 | 2023-09-01T11:43:47 | 47,947,919 | 209 | 561 | null | 2023-09-14T01:47:59 | 2015-12-14T02:38:57 | HTML | UTF-8 | Python | false | false | 3,562 | py | # -*- coding: utf-8 -*-
#############################################################################
#
# Cybrosys Technologies Pvt. Ltd.
#
# Copyright (C) 2019-TODAY Cybrosys Technologies(<https://www.cybrosys.com>)
# Author: Cybrosys Techno Solutions(<https://www.cybrosys.com>)
#
# You can modify it under the terms of the GNU LESSER
# GENERAL PUBLIC LICENSE (LGPL v3), Version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU LESSER GENERAL PUBLIC LICENSE (LGPL v3) for more details.
#
# You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE
# (LGPL v3) along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
from odoo import api, models, fields
from odoo.tools.misc import get_lang
class AccountCommonReport(models.Model):
_inherit = "account.report"
_description = "Account Common Report"
company_id = fields.Many2one('res.company', string='Company', required=True,
readonly=True,
default=lambda self: self.env.company)
journal_ids = fields.Many2many(
comodel_name='account.journal',
string='Journals',
required=True,
default=lambda self: self.env['account.journal'].search(
[('company_id', '=', self.company_id.id)]),
domain="[('company_id', '=', company_id)]",
)
date_from = fields.Date(string='Start Date')
date_to = fields.Date(string='End Date')
target_move = fields.Selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], string='Target Moves', required=True,
default='posted')
@api.onchange('company_id')
def _onchange_company_id(self):
if self.company_id:
self.journal_ids = self.env['account.journal'].search(
[('company_id', '=', self.company_id.id)])
else:
self.journal_ids = self.env['account.journal'].search([])
def _build_contexts(self, data):
result = {}
result['journal_ids'] = 'journal_ids' in data['form'] and data['form'][
'journal_ids'] or False
result['state'] = 'target_move' in data['form'] and data['form'][
'target_move'] or ''
result['date_from'] = data['form']['date_from'] or False
result['date_to'] = data['form']['date_to'] or False
result['strict_range'] = True if result['date_from'] else False
result['company_id'] = data['form']['company_id'][0] or False
return result
def _print_report(self, data):
raise NotImplementedError()
def check_report(self):
print('Account.report')
self.ensure_one()
data = {'ids': self.env.context.get('active_ids', []),
'model': self.env.context.get('active_model', 'ir.ui.menu'),
'form': self.read(
['date_from', 'date_to', 'journal_ids', 'target_move',
'company_id'])[0]}
used_context = self._build_contexts(data)
data['form']['used_context'] = dict(used_context,
lang=get_lang(self.env).code)
return self.with_context(discard_logo_check=True)._print_report(data)
| [
"[email protected]"
] | |
79180c09bcb81b56e6d9d1043b6380e55871d2a0 | c7e765a9bed33d3bfb21774e3995bf4a09e04add | /adminmgr/media/code/A3/task1/BD_135_703_2371_KhgNwL4.py | 39a4a494197832cb4b20798bc47cbace9f61a4d5 | [
"Apache-2.0"
] | permissive | IamMayankThakur/test-bigdata | 13dd2ac7fb76c9baed6c3a0aa943057a22e2d237 | 7f507918c7bec31c92eedcd94491a83486623049 | refs/heads/master | 2022-05-03T00:59:44.127494 | 2022-02-10T19:50:16 | 2022-02-10T19:50:16 | 201,585,028 | 10 | 4 | Apache-2.0 | 2022-04-22T23:39:45 | 2019-08-10T05:34:09 | Python | UTF-8 | Python | false | false | 1,134 | py | import findspark
findspark.init()
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import Row,SQLContext
import sys
import requests
def func(rdd):
sorted_rdd1 = rdd.sortBy(lambda x: (-x[1],x[0]))
sorted_rdd=sorted_rdd1.filter(lambda y: y[0] !='')
s_list=sorted_rdd.collect()
if(s_list!=[]):
print(s_list[0][0],s_list[1][0],s_list[2][0],s_list[3][0],s_list[4][0],sep=",")
conf=SparkConf()
conf.setAppName("BigData")
sc=SparkContext(conf=conf)
ssc=StreamingContext(sc,int(sys.argv[2]))
ssc.checkpoint("/checkpoint_BIGDATA")
dataStream=ssc.socketTextStream("localhost",9009)
hashtag1=dataStream.window(int(sys.argv[1]),1)
if(',' in hashtag1.select(lambda w: w.split(";")[7])):
hashtag2=hashtag1.select(lambda w: w.split(";")[7])
hashtag3=hashtag2.flatmap(lambda p:p.split(","))
else:
hashtag3=hashtag1.flatmap(lambda w: w.split(";")[7])
hashtag4 = hashtag3.map(lambda x: (x,1))
#hashtags=hashtag4.reduceByKey(add)
hashtags=hashtag4.updateStateByKey(lambda x,y:int(x)+int(y))
hashtags.foreachRDD(func)
ssc.start()
ssc.awaitTermination(25)
ssc.stop()
| [
"[email protected]"
] | |
71ff48d27a98e522cb1183c1508f3fd16ee598fa | 521a5abf021aff0e5bec6e4723efb2d95bc1c528 | /dva/urls.py | f5d1f059a314c2edc9fa63fd0894759abd496b16 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | CVML/DeepVideoAnalytics | be3ed41968a140328e25c22f2cb2be431a2b172d | d0969b503ed68dc9ee26279c341e1540bfefd4f6 | refs/heads/master | 2021-07-17T22:19:20.787228 | 2017-10-22T07:55:04 | 2017-10-22T07:55:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | """dva URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
admin.autodiscover()
urlpatterns = [url(r'^admin/', admin.site.urls),
url(r'^api/', include('dvaapp.urls')),
url(r'', include('dvaui.urls'))]+\
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DVA_PRIVATE_ENABLE:
urlpatterns.append(url(r'^apps/', include('dvap.urls')))
if settings.DEBUG and settings.MACOS:
import debug_toolbar
urlpatterns = [url(r'^__debug__/', include(debug_toolbar.urls)),] + urlpatterns
| [
"[email protected]"
] | |
c5383493a1f9677eb1111b85946e9ad9e14fe2b8 | fab14fae2b494068aa793901d76464afb965df7e | /benchmarks/f3_wrong_hints/scaling_nonlinear_software/10-19_35.py | 6ae8d393dc099f7aac05202aa5a431adcbaf9881 | [
"MIT"
] | permissive | teodorov/F3 | 673f6f9ccc25acdfdecbfc180f439253474ba250 | c863215c318d7d5f258eb9be38c6962cf6863b52 | refs/heads/master | 2023-08-04T17:37:38.771863 | 2021-09-16T07:38:28 | 2021-09-16T07:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,238 | py | from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc = Location(env, mgr.LE(z, i_0))
loc.set_progress(0, mgr.Equals(x_z, z))
h_z = Hint("h_z0", env, frozenset([z]), symbs)
h_z.set_locs([loc])
res.append(h_z)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc2 = Location(env, mgr.GE(y, i_2))
loc2.set_progress(0, mgr.Equals(x_y, y))
h_y = Hint("h_y1", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)),
stutterT=stutter)
loc.set_progress(0, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_2)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(z, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, z)))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.GE(x_z, mgr.Plus(z, y)))
loc1 = Location(env, mgr.GE(z, i_3), mgr.GE(x, i_0))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, i_0)))
h_z = Hint("h_z2", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_pc, i_3))
loc1 = Location(env, mgr.Equals(pc, i_3))
loc1.set_progress(0, mgr.Equals(x_pc, i_1))
h_pc = Hint("h_pc2", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(x, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, x)))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3))
loc1.set_progress(2, mgr.Equals(x_y, y))
loc2 = Location(env, mgr.GE(y, i_3))
loc2.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_x, mgr.Plus(x, y)))
loc2 = Location(env, mgr.GT(x, i_3))
loc2.set_progress(2, mgr.Equals(x_x, x))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(2, mgr.GE(x_pc, i_3))
loc2 = Location(env, mgr.GE(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc4", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
return frozenset(res)
| [
"[email protected]"
] | |
e9ab407ab62a078acf59f7b0df91d74002198a3b | 76800454958c36c26d828ee989f181990813955f | /tutorial/HungyiLee/rnn/preprocess.py | 327adbf18ae8258660063e2fe4c3dfa221fd5d67 | [] | no_license | JuneXia/proml | 374b27b1d1c2d983aae4fed3141a2864a7196a1b | fbe86564013e7556c30d98c702c1ba6251f21851 | refs/heads/master | 2022-12-27T23:38:16.838639 | 2020-10-11T10:05:32 | 2020-10-11T10:05:32 | 267,207,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,405 | py | # preprocess.py
# 這個 block 用來做 data 的預處理
import torch
from torch import nn
from gensim.models import Word2Vec
class Preprocess():
def __init__(self, sentences, sen_len, w2v_path="./w2v.model"):
self.w2v_path = w2v_path
self.sentences = sentences
self.sen_len = sen_len
self.idx2word = []
self.word2idx = {}
self.embedding_matrix = []
def get_w2v_model(self):
# 把之前訓練好的 word to vec 模型讀進來
self.embedding = Word2Vec.load(self.w2v_path)
self.embedding_dim = self.embedding.vector_size
def add_embedding(self, word):
# 把 word 加進 embedding,並賦予他一個隨機生成的 representation vector
# word 只會是 "<PAD>" 或 "<UNK>"
vector = torch.empty(1, self.embedding_dim)
torch.nn.init.uniform_(vector)
self.word2idx[word] = len(self.word2idx)
self.idx2word.append(word)
self.embedding_matrix = torch.cat([self.embedding_matrix, vector], 0)
def make_embedding(self, load=True):
print("Get embedding ...")
# 取得訓練好的 Word2vec word embedding
if load:
print("loading word to vec model ...")
self.get_w2v_model()
else:
raise NotImplementedError
# 製作一個 word2idx 的 dictionary
# 製作一個 idx2word 的 list
# 製作一個 word2vector 的 list
for i, word in enumerate(self.embedding.wv.vocab):
print('get words #{}'.format(i+1), end='\r')
#e.g. self.word2index['he'] = 1
#e.g. self.index2word[1] = 'he'
#e.g. self.vectors[1] = 'he' vector
self.word2idx[word] = len(self.word2idx)
self.idx2word.append(word)
self.embedding_matrix.append(self.embedding[word])
print('')
self.embedding_matrix = torch.tensor(self.embedding_matrix)
# 將 "<PAD>" 跟 "<UNK>" 加進 embedding 裡面
self.add_embedding("<PAD>")
self.add_embedding("<UNK>")
print("total words: {}".format(len(self.embedding_matrix)))
return self.embedding_matrix
def pad_sequence(self, sentence):
# 將每個句子變成一樣的長度
if len(sentence) > self.sen_len:
sentence = sentence[:self.sen_len]
else:
pad_len = self.sen_len - len(sentence)
for _ in range(pad_len):
sentence.append(self.word2idx["<PAD>"])
assert len(sentence) == self.sen_len
return sentence
def sentence_word2idx(self):
# 把句子裡面的字轉成相對應的 index
sentence_list = []
for i, sen in enumerate(self.sentences):
print('sentence count #{}'.format(i+1), end='\r')
sentence_idx = []
for word in sen:
if (word in self.word2idx.keys()):
sentence_idx.append(self.word2idx[word])
else:
sentence_idx.append(self.word2idx["<UNK>"])
# 將每個句子變成一樣的長度
sentence_idx = self.pad_sequence(sentence_idx)
sentence_list.append(sentence_idx)
return torch.LongTensor(sentence_list)
def labels_to_tensor(self, y):
# 把 labels 轉成 tensor
y = [int(label) for label in y]
return torch.LongTensor(y) | [
"[email protected]"
] | |
9649970d62f951be36ba3cde0f8017e23aa5c6c1 | 7db6c1865cf9102808824ff06cda747b6e572a21 | /Python/Test/Locust/Test/wsh_loadtest.py | 53219d5e5215e6e3cee8591f2b5756445323a944 | [] | no_license | hyteer/testing | 1f6cabc1d2b67faa4533e6ad7eb5be8c13d542c9 | 1d8b47b3bbb2daf00e4f15b5d18e86111ea4e113 | refs/heads/master | 2020-05-21T16:19:08.243676 | 2017-01-03T01:25:17 | 2017-01-03T01:25:17 | 60,914,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,625 | py | from locust import HttpLocust, TaskSet, task
import json
counter = 0
class UserBehavior(TaskSet):
#global counter
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
self.login()
def login(self):
res = self.client.post("/login/login-ajax", {"username":"20160912", "password":"123456","captcha":"1"})
print "Response status code:", res.status_code
print "Response content:", res.content
if res:
self.count_test()
def count_test(self):
global counter
counter += 1
print "counter:%d" % counter
@task(2)
def index(self):
res = self.client.get("/")
if res:
self.count_test()
@task(1)
def member_list(self):
res = self.client.post("/member/list-ajax", {"_page":1,"_page_size":20,"nickname":"","group_id":None,"shop_sub_id":"","agent_id":"","is_search":False,"belong_to_staff_id":"","createStart":"","createEnd":"","group_ids":[],"yestoday":False,"user_platform":0,"tags":[]})
content = json.loads(res.content)
errmsg = content["errmsg"]
errcode = content["errcode"]
print "errcode:%s,\nerrmsg:%s" % (errcode,str(errmsg))
self.count_test()
#print "Response status code:", res.status_code
#print "Response content:", res.content
@task(1)
def second_kill_list(self):
res = self.client.get("/second-kill/list")
if res:
self.count_test()
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 5000
max_wait = 6000
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.